2024-12-07 13:23:02,806 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-07 13:23:02,821 main DEBUG Took 0.012851 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 13:23:02,822 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 13:23:02,822 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 13:23:02,824 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 13:23:02,825 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,834 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 13:23:02,850 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,851 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,852 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,852 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,853 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,853 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,854 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,854 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,855 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,855 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,856 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,857 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,857 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,858 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,858 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,858 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,859 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,859 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,860 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,860 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,861 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,861 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,861 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,862 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 13:23:02,862 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,863 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 13:23:02,864 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 13:23:02,866 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 13:23:02,868 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 13:23:02,869 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 13:23:02,870 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 13:23:02,870 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 13:23:02,880 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 13:23:02,883 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 13:23:02,885 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 13:23:02,886 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 13:23:02,886 main DEBUG createAppenders(={Console}) 2024-12-07 13:23:02,887 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-07 13:23:02,887 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-07 13:23:02,888 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-07 13:23:02,888 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 13:23:02,889 main DEBUG OutputStream closed 2024-12-07 13:23:02,889 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 13:23:02,889 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 13:23:02,890 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-07 13:23:02,969 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 13:23:02,972 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 13:23:02,973 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 13:23:02,974 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 13:23:02,974 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 13:23:02,975 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 13:23:02,975 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 13:23:02,976 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 13:23:02,976 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 13:23:02,976 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 13:23:02,977 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 13:23:02,977 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 13:23:02,978 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 13:23:02,978 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 13:23:02,978 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 13:23:02,978 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 13:23:02,979 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 13:23:02,980 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 13:23:02,981 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 13:23:02,982 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-07 13:23:02,982 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 13:23:02,983 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-07T13:23:03,252 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872 2024-12-07 13:23:03,255 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 13:23:03,255 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T13:23:03,263 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-07T13:23:03,291 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=147, ProcessCount=11, AvailableMemoryMB=16547 2024-12-07T13:23:03,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T13:23:03,311 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63, deleteOnExit=true 2024-12-07T13:23:03,311 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T13:23:03,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/test.cache.data in system properties and HBase conf 2024-12-07T13:23:03,313 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T13:23:03,314 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/hadoop.log.dir in system properties and HBase conf 2024-12-07T13:23:03,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T13:23:03,315 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T13:23:03,316 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T13:23:03,395 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T13:23:03,475 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T13:23:03,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:23:03,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:23:03,480 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T13:23:03,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:23:03,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T13:23:03,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T13:23:03,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:23:03,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:23:03,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T13:23:03,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/nfs.dump.dir in system properties and HBase conf 2024-12-07T13:23:03,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/java.io.tmpdir in system properties and HBase conf 2024-12-07T13:23:03,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:23:03,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T13:23:03,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T13:23:03,924 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:23:04,510 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T13:23:04,580 INFO [Time-limited test {}] log.Log(170): Logging initialized @2419ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T13:23:04,648 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:23:04,703 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:23:04,722 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:23:04,722 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:23:04,724 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:23:04,739 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:23:04,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:23:04,743 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:23:04,938 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/java.io.tmpdir/jetty-localhost-40063-hadoop-hdfs-3_4_1-tests_jar-_-any-17044641936855192207/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:23:04,946 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:40063} 2024-12-07T13:23:04,946 INFO [Time-limited test {}] server.Server(415): Started @2787ms 2024-12-07T13:23:04,973 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:23:05,462 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:23:05,468 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:23:05,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:23:05,469 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:23:05,469 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:23:05,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:23:05,470 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:23:05,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/java.io.tmpdir/jetty-localhost-37211-hadoop-hdfs-3_4_1-tests_jar-_-any-10245520030144733245/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:23:05,568 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:37211} 2024-12-07T13:23:05,568 INFO [Time-limited test {}] server.Server(415): Started @3408ms 2024-12-07T13:23:05,620 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:23:05,734 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:23:05,739 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:23:05,745 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:23:05,746 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:23:05,746 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:23:05,747 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:23:05,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:23:05,852 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/java.io.tmpdir/jetty-localhost-46419-hadoop-hdfs-3_4_1-tests_jar-_-any-339755232309809139/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:23:05,853 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:46419} 2024-12-07T13:23:05,853 INFO [Time-limited test {}] server.Server(415): Started @3694ms 2024-12-07T13:23:05,856 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:23:06,993 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/data/data4/current/BP-1090316692-172.17.0.3-1733577784020/current, will proceed with Du for space computation calculation, 2024-12-07T13:23:06,993 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/data/data3/current/BP-1090316692-172.17.0.3-1733577784020/current, will proceed with Du for space computation calculation, 2024-12-07T13:23:07,018 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:23:07,060 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5612c7cf87f47275 with lease ID 0xd6ec05bfc96564fc: Processing first storage report for DS-7c972b3a-16d7-447b-88aa-e9490bb58535 from datanode DatanodeRegistration(127.0.0.1:43507, datanodeUuid=37addea5-ca25-47c3-9bd4-4cccea3bd8fd, infoPort=37739, infoSecurePort=0, ipcPort=44239, storageInfo=lv=-57;cid=testClusterID;nsid=103105813;c=1733577784020) 2024-12-07T13:23:07,060 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/data/data2/current/BP-1090316692-172.17.0.3-1733577784020/current, will proceed with Du for space computation calculation, 2024-12-07T13:23:07,060 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/data/data1/current/BP-1090316692-172.17.0.3-1733577784020/current, will proceed with Du for space computation calculation, 2024-12-07T13:23:07,061 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5612c7cf87f47275 with lease ID 0xd6ec05bfc96564fc: from storage DS-7c972b3a-16d7-447b-88aa-e9490bb58535 node DatanodeRegistration(127.0.0.1:43507, datanodeUuid=37addea5-ca25-47c3-9bd4-4cccea3bd8fd, infoPort=37739, infoSecurePort=0, ipcPort=44239, storageInfo=lv=-57;cid=testClusterID;nsid=103105813;c=1733577784020), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T13:23:07,062 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5612c7cf87f47275 with lease ID 0xd6ec05bfc96564fc: Processing first storage report for DS-0146e7bf-14ac-4fc5-9f34-ce927de76dbf from datanode DatanodeRegistration(127.0.0.1:43507, datanodeUuid=37addea5-ca25-47c3-9bd4-4cccea3bd8fd, infoPort=37739, infoSecurePort=0, ipcPort=44239, storageInfo=lv=-57;cid=testClusterID;nsid=103105813;c=1733577784020) 2024-12-07T13:23:07,062 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5612c7cf87f47275 with lease ID 0xd6ec05bfc96564fc: from storage DS-0146e7bf-14ac-4fc5-9f34-ce927de76dbf node DatanodeRegistration(127.0.0.1:43507, datanodeUuid=37addea5-ca25-47c3-9bd4-4cccea3bd8fd, infoPort=37739, infoSecurePort=0, ipcPort=44239, storageInfo=lv=-57;cid=testClusterID;nsid=103105813;c=1733577784020), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:23:07,077 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:23:07,082 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5a7e6fed3728e887 with lease ID 0xd6ec05bfc96564fd: Processing first storage report for DS-0f51dbbd-c381-41fa-93a3-d43428451116 from datanode DatanodeRegistration(127.0.0.1:41333, datanodeUuid=b936396c-8d64-4fa5-af0b-7ee10e6fbff6, infoPort=35967, infoSecurePort=0, ipcPort=33463, storageInfo=lv=-57;cid=testClusterID;nsid=103105813;c=1733577784020) 2024-12-07T13:23:07,082 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5a7e6fed3728e887 with lease ID 0xd6ec05bfc96564fd: from storage DS-0f51dbbd-c381-41fa-93a3-d43428451116 node DatanodeRegistration(127.0.0.1:41333, datanodeUuid=b936396c-8d64-4fa5-af0b-7ee10e6fbff6, infoPort=35967, infoSecurePort=0, ipcPort=33463, storageInfo=lv=-57;cid=testClusterID;nsid=103105813;c=1733577784020), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:23:07,082 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5a7e6fed3728e887 with lease ID 0xd6ec05bfc96564fd: Processing first storage report for DS-af7a4329-7720-4c73-9f73-6fdbb11cf6ae from datanode DatanodeRegistration(127.0.0.1:41333, datanodeUuid=b936396c-8d64-4fa5-af0b-7ee10e6fbff6, infoPort=35967, infoSecurePort=0, ipcPort=33463, storageInfo=lv=-57;cid=testClusterID;nsid=103105813;c=1733577784020) 2024-12-07T13:23:07,083 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5a7e6fed3728e887 with lease ID 0xd6ec05bfc96564fd: from storage DS-af7a4329-7720-4c73-9f73-6fdbb11cf6ae node DatanodeRegistration(127.0.0.1:41333, datanodeUuid=b936396c-8d64-4fa5-af0b-7ee10e6fbff6, infoPort=35967, infoSecurePort=0, ipcPort=33463, storageInfo=lv=-57;cid=testClusterID;nsid=103105813;c=1733577784020), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T13:23:07,181 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872 2024-12-07T13:23:07,240 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/zookeeper_0, clientPort=55327, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T13:23:07,249 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55327 2024-12-07T13:23:07,262 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:23:07,265 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:23:07,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:23:07,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:23:07,885 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba with version=8 2024-12-07T13:23:07,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/hbase-staging 2024-12-07T13:23:07,966 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T13:23:08,223 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:23:08,232 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:23:08,233 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:23:08,238 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:23:08,238 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:23:08,238 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:23:08,377 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T13:23:08,430 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T13:23:08,438 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T13:23:08,442 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:23:08,465 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 63880 (auto-detected) 2024-12-07T13:23:08,466 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-07T13:23:08,485 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34945 2024-12-07T13:23:08,504 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34945 connecting to ZooKeeper ensemble=127.0.0.1:55327 2024-12-07T13:23:08,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:349450x0, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:23:08,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34945-0x1000071ce200000 connected 2024-12-07T13:23:09,213 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:23:09,217 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:23:09,228 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:23:09,233 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba, hbase.cluster.distributed=false 2024-12-07T13:23:09,261 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:23:09,267 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34945 2024-12-07T13:23:09,267 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34945 2024-12-07T13:23:09,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34945 2024-12-07T13:23:09,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34945 2024-12-07T13:23:09,272 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34945 2024-12-07T13:23:09,380 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:23:09,382 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:23:09,382 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:23:09,382 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:23:09,382 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:23:09,382 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:23:09,385 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T13:23:09,387 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:23:09,388 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45921 2024-12-07T13:23:09,390 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45921 connecting to ZooKeeper ensemble=127.0.0.1:55327 2024-12-07T13:23:09,391 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:23:09,395 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:23:09,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:459210x0, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:23:09,424 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:459210x0, quorum=127.0.0.1:55327, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:23:09,425 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45921-0x1000071ce200001 connected 2024-12-07T13:23:09,429 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T13:23:09,438 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T13:23:09,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T13:23:09,448 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:23:09,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45921 2024-12-07T13:23:09,450 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45921 2024-12-07T13:23:09,450 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45921 2024-12-07T13:23:09,451 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45921 2024-12-07T13:23:09,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45921 2024-12-07T13:23:09,465 DEBUG [M:0;c7c455b68129:34945 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c7c455b68129:34945 2024-12-07T13:23:09,466 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c7c455b68129,34945,1733577788070 2024-12-07T13:23:09,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:23:09,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:23:09,477 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c7c455b68129,34945,1733577788070 2024-12-07T13:23:09,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:09,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T13:23:09,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:09,506 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T13:23:09,507 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c7c455b68129,34945,1733577788070 from backup master directory 2024-12-07T13:23:09,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:23:09,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c7c455b68129,34945,1733577788070 2024-12-07T13:23:09,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:23:09,518 WARN [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:23:09,518 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c7c455b68129,34945,1733577788070 2024-12-07T13:23:09,520 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T13:23:09,521 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T13:23:09,582 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/hbase.id] with ID: 09dc5aa9-426b-4bb7-8341-4eb9b2474df6 2024-12-07T13:23:09,582 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/.tmp/hbase.id 2024-12-07T13:23:09,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:23:09,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:23:09,595 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/.tmp/hbase.id]:[hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/hbase.id] 2024-12-07T13:23:09,637 INFO [master/c7c455b68129:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:23:09,642 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T13:23:09,658 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-07T13:23:09,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:09,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:09,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:23:09,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:23:09,716 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:23:09,718 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T13:23:09,724 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:23:09,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:23:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:23:10,170 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store 2024-12-07T13:23:10,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:23:10,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:23:10,192 INFO [master/c7c455b68129:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T13:23:10,195 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:23:10,197 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:23:10,197 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:23:10,197 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:23:10,198 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:23:10,199 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:23:10,199 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:23:10,200 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733577790196Disabling compacts and flushes for region at 1733577790196Disabling writes for close at 1733577790199 (+3 ms)Writing region close event to WAL at 1733577790199Closed at 1733577790199 2024-12-07T13:23:10,203 WARN [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/.initializing 2024-12-07T13:23:10,203 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/WALs/c7c455b68129,34945,1733577788070 2024-12-07T13:23:10,225 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C34945%2C1733577788070, suffix=, logDir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/WALs/c7c455b68129,34945,1733577788070, archiveDir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/oldWALs, maxLogs=10 2024-12-07T13:23:10,233 INFO [master/c7c455b68129:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C34945%2C1733577788070.1733577790229 2024-12-07T13:23:10,251 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/WALs/c7c455b68129,34945,1733577788070/c7c455b68129%2C34945%2C1733577788070.1733577790229 2024-12-07T13:23:10,259 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37739:37739),(127.0.0.1/127.0.0.1:35967:35967)] 2024-12-07T13:23:10,260 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:23:10,260 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:23:10,263 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,264 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,297 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,321 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T13:23:10,324 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:10,326 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:23:10,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T13:23:10,330 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:10,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:23:10,331 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,334 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T13:23:10,334 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:10,335 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:23:10,335 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,337 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T13:23:10,337 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:10,338 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:23:10,339 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,343 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,345 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,351 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,351 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,354 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T13:23:10,358 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:23:10,362 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:23:10,364 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=689481, jitterRate=-0.12328071892261505}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T13:23:10,372 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733577790276Initializing all the Stores at 1733577790278 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577790279 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577790279Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577790280 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577790280Cleaning up temporary data from old regions at 1733577790351 (+71 ms)Region opened successfully at 1733577790371 (+20 ms) 2024-12-07T13:23:10,373 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T13:23:10,403 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319e001b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:23:10,430 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T13:23:10,440 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T13:23:10,440 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T13:23:10,443 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T13:23:10,444 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T13:23:10,449 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-07T13:23:10,449 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T13:23:10,470 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T13:23:10,478 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T13:23:10,580 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T13:23:10,582 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T13:23:10,584 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T13:23:10,591 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T13:23:10,593 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T13:23:10,596 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T13:23:10,601 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T13:23:10,603 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T13:23:10,612 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T13:23:10,630 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T13:23:10,641 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T13:23:10,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:23:10,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:23:10,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:10,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:10,739 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c7c455b68129,34945,1733577788070, sessionid=0x1000071ce200000, setting cluster-up flag (Was=false) 2024-12-07T13:23:10,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:10,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:10,875 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T13:23:10,877 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,34945,1733577788070 2024-12-07T13:23:10,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:10,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:10,928 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T13:23:10,930 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,34945,1733577788070 2024-12-07T13:23:10,936 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T13:23:10,957 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(746): ClusterId : 09dc5aa9-426b-4bb7-8341-4eb9b2474df6 2024-12-07T13:23:10,959 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T13:23:10,972 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T13:23:10,972 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T13:23:10,981 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T13:23:10,982 DEBUG [RS:0;c7c455b68129:45921 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1544a906, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:23:10,994 DEBUG [RS:0;c7c455b68129:45921 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c7c455b68129:45921 2024-12-07T13:23:10,997 INFO [RS:0;c7c455b68129:45921 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T13:23:10,997 INFO [RS:0;c7c455b68129:45921 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T13:23:10,997 DEBUG [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T13:23:10,999 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(2659): reportForDuty to master=c7c455b68129,34945,1733577788070 with port=45921, startcode=1733577789348 2024-12-07T13:23:11,003 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T13:23:11,009 DEBUG [RS:0;c7c455b68129:45921 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T13:23:11,012 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T13:23:11,019 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T13:23:11,025 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c7c455b68129,34945,1733577788070 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T13:23:11,031 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:23:11,032 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:23:11,032 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:23:11,032 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:23:11,032 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c7c455b68129:0, corePoolSize=10, maxPoolSize=10 2024-12-07T13:23:11,032 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,032 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:23:11,033 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,035 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733577821034 2024-12-07T13:23:11,036 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T13:23:11,038 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T13:23:11,039 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:23:11,040 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T13:23:11,041 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T13:23:11,042 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T13:23:11,042 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T13:23:11,042 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T13:23:11,046 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,052 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T13:23:11,053 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T13:23:11,054 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T13:23:11,054 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:11,055 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T13:23:11,056 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T13:23:11,057 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T13:23:11,059 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577791058,5,FailOnTimeoutGroup] 2024-12-07T13:23:11,060 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577791059,5,FailOnTimeoutGroup] 2024-12-07T13:23:11,060 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,061 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T13:23:11,062 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,063 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:23:11,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:23:11,068 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T13:23:11,069 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba 2024-12-07T13:23:11,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:23:11,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:23:11,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:23:11,082 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53473, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T13:23:11,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:23:11,087 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:23:11,088 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:11,089 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34945 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c7c455b68129,45921,1733577789348 2024-12-07T13:23:11,089 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:23:11,089 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:23:11,091 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34945 {}] master.ServerManager(517): Registering regionserver=c7c455b68129,45921,1733577789348 2024-12-07T13:23:11,091 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:23:11,091 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:11,092 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:23:11,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:23:11,095 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:23:11,096 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:11,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:23:11,097 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:23:11,100 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:23:11,100 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:11,101 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:23:11,101 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:23:11,102 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740 2024-12-07T13:23:11,103 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740 2024-12-07T13:23:11,105 DEBUG [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba 2024-12-07T13:23:11,105 DEBUG [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33723 2024-12-07T13:23:11,106 DEBUG [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T13:23:11,107 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:23:11,107 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:23:11,108 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:23:11,112 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:23:11,116 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:23:11,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:23:11,118 DEBUG [RS:0;c7c455b68129:45921 {}] zookeeper.ZKUtil(111): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c7c455b68129,45921,1733577789348 2024-12-07T13:23:11,118 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770986, jitterRate=-0.019640684127807617}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:23:11,118 WARN [RS:0;c7c455b68129:45921 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:23:11,118 INFO [RS:0;c7c455b68129:45921 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:23:11,118 DEBUG [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348 2024-12-07T13:23:11,120 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c7c455b68129,45921,1733577789348] 2024-12-07T13:23:11,121 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733577791082Initializing all the Stores at 1733577791084 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577791084Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577791085 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577791085Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577791085Cleaning up temporary data from old regions at 1733577791107 (+22 ms)Region opened successfully at 1733577791121 (+14 ms) 2024-12-07T13:23:11,121 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:23:11,121 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:23:11,121 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:23:11,121 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:23:11,122 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:23:11,123 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:23:11,123 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733577791121Disabling compacts and flushes for region at 1733577791121Disabling writes for close at 1733577791122 (+1 ms)Writing region close event to WAL at 1733577791122Closed at 1733577791123 (+1 ms) 2024-12-07T13:23:11,127 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:23:11,127 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T13:23:11,134 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T13:23:11,142 INFO [RS:0;c7c455b68129:45921 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T13:23:11,144 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:23:11,146 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T13:23:11,155 INFO [RS:0;c7c455b68129:45921 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T13:23:11,159 INFO [RS:0;c7c455b68129:45921 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T13:23:11,160 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,160 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T13:23:11,166 INFO [RS:0;c7c455b68129:45921 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T13:23:11,167 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,168 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,168 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,168 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,168 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,168 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,168 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:23:11,168 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,168 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,169 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,169 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,169 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,169 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:23:11,169 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:23:11,169 DEBUG [RS:0;c7c455b68129:45921 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:23:11,170 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,170 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,170 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,171 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,171 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,171 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,45921,1733577789348-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:23:11,187 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T13:23:11,188 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,45921,1733577789348-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,189 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,189 INFO [RS:0;c7c455b68129:45921 {}] regionserver.Replication(171): c7c455b68129,45921,1733577789348 started 2024-12-07T13:23:11,205 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:11,206 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(1482): Serving as c7c455b68129,45921,1733577789348, RpcServer on c7c455b68129/172.17.0.3:45921, sessionid=0x1000071ce200001 2024-12-07T13:23:11,206 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T13:23:11,206 DEBUG [RS:0;c7c455b68129:45921 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c7c455b68129,45921,1733577789348 2024-12-07T13:23:11,207 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,45921,1733577789348' 2024-12-07T13:23:11,207 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T13:23:11,208 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T13:23:11,208 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T13:23:11,208 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T13:23:11,209 DEBUG [RS:0;c7c455b68129:45921 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c7c455b68129,45921,1733577789348 2024-12-07T13:23:11,209 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,45921,1733577789348' 2024-12-07T13:23:11,209 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T13:23:11,209 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T13:23:11,210 DEBUG [RS:0;c7c455b68129:45921 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T13:23:11,210 INFO [RS:0;c7c455b68129:45921 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T13:23:11,210 INFO [RS:0;c7c455b68129:45921 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T13:23:11,297 WARN [c7c455b68129:34945 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T13:23:11,317 INFO [RS:0;c7c455b68129:45921 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C45921%2C1733577789348, suffix=, logDir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348, archiveDir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs, maxLogs=32 2024-12-07T13:23:11,320 INFO [RS:0;c7c455b68129:45921 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C45921%2C1733577789348.1733577791320 2024-12-07T13:23:11,329 INFO [RS:0;c7c455b68129:45921 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577791320 2024-12-07T13:23:11,331 DEBUG [RS:0;c7c455b68129:45921 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35967:35967),(127.0.0.1/127.0.0.1:37739:37739)] 2024-12-07T13:23:11,550 DEBUG [c7c455b68129:34945 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T13:23:11,560 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c7c455b68129,45921,1733577789348 2024-12-07T13:23:11,567 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,45921,1733577789348, state=OPENING 2024-12-07T13:23:11,633 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T13:23:11,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:11,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:23:11,644 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:23:11,644 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:23:11,646 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:23:11,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,45921,1733577789348}] 2024-12-07T13:23:11,822 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T13:23:11,825 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41001, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T13:23:11,835 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T13:23:11,836 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:23:11,839 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C45921%2C1733577789348.meta, suffix=.meta, logDir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348, archiveDir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs, maxLogs=32 2024-12-07T13:23:11,841 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C45921%2C1733577789348.meta.1733577791841.meta 2024-12-07T13:23:11,848 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.meta.1733577791841.meta 2024-12-07T13:23:11,849 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35967:35967),(127.0.0.1/127.0.0.1:37739:37739)] 2024-12-07T13:23:11,851 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:23:11,853 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T13:23:11,855 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T13:23:11,860 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T13:23:11,864 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T13:23:11,865 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:23:11,865 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T13:23:11,865 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T13:23:11,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:23:11,870 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:23:11,870 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:11,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:23:11,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:23:11,872 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:23:11,872 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:11,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:23:11,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:23:11,875 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:23:11,875 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:11,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:23:11,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:23:11,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:23:11,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:11,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:23:11,878 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:23:11,880 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740 2024-12-07T13:23:11,882 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740 2024-12-07T13:23:11,885 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:23:11,885 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:23:11,886 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:23:11,888 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:23:11,890 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804838, jitterRate=0.02340547740459442}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:23:11,890 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T13:23:11,891 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733577791866Writing region info on filesystem at 1733577791866Initializing all the Stores at 1733577791868 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577791868Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577791868Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577791868Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577791868Cleaning up temporary data from old regions at 1733577791885 (+17 ms)Running coprocessor post-open hooks at 1733577791890 (+5 ms)Region opened successfully at 1733577791891 (+1 ms) 2024-12-07T13:23:11,897 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733577791815 2024-12-07T13:23:11,907 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T13:23:11,908 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T13:23:11,909 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,45921,1733577789348 2024-12-07T13:23:11,912 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,45921,1733577789348, state=OPEN 2024-12-07T13:23:11,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:23:11,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:23:11,957 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:23:11,957 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:23:11,957 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c7c455b68129,45921,1733577789348 2024-12-07T13:23:11,963 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T13:23:11,963 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,45921,1733577789348 in 310 msec 2024-12-07T13:23:11,970 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T13:23:11,970 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 830 msec 2024-12-07T13:23:11,972 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:23:11,972 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T13:23:11,988 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:23:11,990 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,45921,1733577789348, seqNum=-1] 2024-12-07T13:23:12,007 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:23:12,009 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43779, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:23:12,029 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0630 sec 2024-12-07T13:23:12,030 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733577792029, completionTime=-1 2024-12-07T13:23:12,033 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T13:23:12,033 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T13:23:12,063 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T13:23:12,063 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733577852063 2024-12-07T13:23:12,063 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733577912063 2024-12-07T13:23:12,063 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 30 msec 2024-12-07T13:23:12,067 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,34945,1733577788070-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:12,068 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,34945,1733577788070-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:12,068 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,34945,1733577788070-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:12,069 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c7c455b68129:34945, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:12,070 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:12,070 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:12,076 DEBUG [master/c7c455b68129:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T13:23:12,097 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.579sec 2024-12-07T13:23:12,098 INFO [master/c7c455b68129:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T13:23:12,099 INFO [master/c7c455b68129:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T13:23:12,100 INFO [master/c7c455b68129:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T13:23:12,101 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T13:23:12,101 INFO [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T13:23:12,102 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,34945,1733577788070-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:23:12,102 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,34945,1733577788070-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T13:23:12,110 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T13:23:12,111 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T13:23:12,111 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,34945,1733577788070-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:23:12,169 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:23:12,172 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T13:23:12,172 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T13:23:12,176 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c7c455b68129,34945,-1 for getting cluster id 2024-12-07T13:23:12,180 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T13:23:12,187 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '09dc5aa9-426b-4bb7-8341-4eb9b2474df6' 2024-12-07T13:23:12,190 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T13:23:12,190 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "09dc5aa9-426b-4bb7-8341-4eb9b2474df6" 2024-12-07T13:23:12,192 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7422899d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:23:12,192 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c7c455b68129,34945,-1] 2024-12-07T13:23:12,194 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T13:23:12,196 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:23:12,198 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57166, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T13:23:12,201 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:23:12,201 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:23:12,208 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,45921,1733577789348, seqNum=-1] 2024-12-07T13:23:12,209 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:23:12,211 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53476, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:23:12,230 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c7c455b68129,34945,1733577788070 2024-12-07T13:23:12,230 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:23:12,236 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T13:23:12,240 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T13:23:12,244 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is c7c455b68129,34945,1733577788070 2024-12-07T13:23:12,246 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@53459839 2024-12-07T13:23:12,247 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T13:23:12,249 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57182, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T13:23:12,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34945 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-07T13:23:12,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34945 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-07T13:23:12,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34945 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:23:12,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34945 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-07T13:23:12,281 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T13:23:12,284 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34945 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-07T13:23:12,284 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:12,287 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T13:23:12,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T13:23:13,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741835_1011 (size=389) 2024-12-07T13:23:13,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741835_1011 (size=389) 2024-12-07T13:23:13,074 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9990b6f765d25fa8a30e2342f021d04a, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba 2024-12-07T13:23:13,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741836_1012 (size=72) 2024-12-07T13:23:13,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741836_1012 (size=72) 2024-12-07T13:23:13,084 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:23:13,084 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 9990b6f765d25fa8a30e2342f021d04a, disabling compactions & flushes 2024-12-07T13:23:13,084 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:23:13,084 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:23:13,084 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. after waiting 0 ms 2024-12-07T13:23:13,084 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:23:13,084 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:23:13,084 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9990b6f765d25fa8a30e2342f021d04a: Waiting for close lock at 1733577793084Disabling compacts and flushes for region at 1733577793084Disabling writes for close at 1733577793084Writing region close event to WAL at 1733577793084Closed at 1733577793084 2024-12-07T13:23:13,087 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T13:23:13,091 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733577793087"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733577793087"}]},"ts":"1733577793087"} 2024-12-07T13:23:13,095 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T13:23:13,097 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T13:23:13,099 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733577793097"}]},"ts":"1733577793097"} 2024-12-07T13:23:13,104 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-07T13:23:13,106 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9990b6f765d25fa8a30e2342f021d04a, ASSIGN}] 2024-12-07T13:23:13,108 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9990b6f765d25fa8a30e2342f021d04a, ASSIGN 2024-12-07T13:23:13,110 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9990b6f765d25fa8a30e2342f021d04a, ASSIGN; state=OFFLINE, location=c7c455b68129,45921,1733577789348; forceNewPlan=false, retain=false 2024-12-07T13:23:13,262 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9990b6f765d25fa8a30e2342f021d04a, regionState=OPENING, regionLocation=c7c455b68129,45921,1733577789348 2024-12-07T13:23:13,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9990b6f765d25fa8a30e2342f021d04a, ASSIGN because future has completed 2024-12-07T13:23:13,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9990b6f765d25fa8a30e2342f021d04a, server=c7c455b68129,45921,1733577789348}] 2024-12-07T13:23:13,439 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:23:13,439 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9990b6f765d25fa8a30e2342f021d04a, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:23:13,440 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,440 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:23:13,440 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,440 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,442 INFO [StoreOpener-9990b6f765d25fa8a30e2342f021d04a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,445 INFO [StoreOpener-9990b6f765d25fa8a30e2342f021d04a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9990b6f765d25fa8a30e2342f021d04a columnFamilyName info 2024-12-07T13:23:13,445 DEBUG [StoreOpener-9990b6f765d25fa8a30e2342f021d04a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:23:13,446 INFO [StoreOpener-9990b6f765d25fa8a30e2342f021d04a-1 {}] regionserver.HStore(327): Store=9990b6f765d25fa8a30e2342f021d04a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:23:13,446 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,448 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,449 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,450 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,450 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,452 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,456 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:23:13,457 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9990b6f765d25fa8a30e2342f021d04a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852282, jitterRate=0.08373303711414337}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T13:23:13,457 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:13,458 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9990b6f765d25fa8a30e2342f021d04a: Running coprocessor pre-open hook at 1733577793440Writing region info on filesystem at 1733577793440Initializing all the Stores at 1733577793442 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577793442Cleaning up temporary data from old regions at 1733577793450 (+8 ms)Running coprocessor post-open hooks at 1733577793457 (+7 ms)Region opened successfully at 1733577793458 (+1 ms) 2024-12-07T13:23:13,460 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a., pid=6, masterSystemTime=1733577793426 2024-12-07T13:23:13,463 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:23:13,464 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:23:13,464 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9990b6f765d25fa8a30e2342f021d04a, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,45921,1733577789348 2024-12-07T13:23:13,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9990b6f765d25fa8a30e2342f021d04a, server=c7c455b68129,45921,1733577789348 because future has completed 2024-12-07T13:23:13,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T13:23:13,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9990b6f765d25fa8a30e2342f021d04a, server=c7c455b68129,45921,1733577789348 in 203 msec 2024-12-07T13:23:13,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T13:23:13,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9990b6f765d25fa8a30e2342f021d04a, ASSIGN in 369 msec 2024-12-07T13:23:13,481 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T13:23:13,481 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733577793481"}]},"ts":"1733577793481"} 2024-12-07T13:23:13,485 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-07T13:23:13,487 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T13:23:13,490 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 1.2140 sec 2024-12-07T13:23:17,377 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-07T13:23:17,428 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T13:23:17,430 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-07T13:23:18,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T13:23:18,428 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T13:23:18,430 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-07T13:23:18,430 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-07T13:23:18,431 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:23:18,431 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T13:23:18,431 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T13:23:18,431 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T13:23:22,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T13:23:22,307 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-07T13:23:22,310 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-07T13:23:22,318 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-07T13:23:22,318 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:23:22,319 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C45921%2C1733577789348.1733577802319 2024-12-07T13:23:22,328 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:22,328 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:22,328 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:22,328 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:22,328 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:22,329 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577791320 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577802319 2024-12-07T13:23:22,331 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35967:35967),(127.0.0.1/127.0.0.1:37739:37739)] 2024-12-07T13:23:22,331 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577791320 is not closed yet, will try archiving it next time 2024-12-07T13:23:22,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741833_1009 (size=451) 2024-12-07T13:23:22,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741833_1009 (size=451) 2024-12-07T13:23:22,337 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577791320 to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs/c7c455b68129%2C45921%2C1733577789348.1733577791320 2024-12-07T13:23:22,339 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a., hostname=c7c455b68129,45921,1733577789348, seqNum=2] 2024-12-07T13:23:34,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45921 {}] regionserver.HRegion(8855): Flush requested on 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:34,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9990b6f765d25fa8a30e2342f021d04a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:23:34,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/e6c15068c4e74ad9af3feeb0531024a9 is 1080, key is row0001/info:/1733577802343/Put/seqid=0 2024-12-07T13:23:34,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741838_1014 (size=12509) 2024-12-07T13:23:34,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741838_1014 (size=12509) 2024-12-07T13:23:34,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/e6c15068c4e74ad9af3feeb0531024a9 2024-12-07T13:23:34,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/e6c15068c4e74ad9af3feeb0531024a9 as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/e6c15068c4e74ad9af3feeb0531024a9 2024-12-07T13:23:34,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/e6c15068c4e74ad9af3feeb0531024a9, entries=7, sequenceid=11, filesize=12.2 K 2024-12-07T13:23:34,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9990b6f765d25fa8a30e2342f021d04a in 120ms, sequenceid=11, compaction requested=false 2024-12-07T13:23:34,567 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9990b6f765d25fa8a30e2342f021d04a: 2024-12-07T13:23:37,180 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T13:23:42,464 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C45921%2C1733577789348.1733577822463 2024-12-07T13:23:42,679 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:42,679 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:42,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:42,680 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:42,680 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:42,680 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:42,681 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577802319 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577822463 2024-12-07T13:23:42,682 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35967:35967),(127.0.0.1/127.0.0.1:37739:37739)] 2024-12-07T13:23:42,682 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577802319 is not closed yet, will try archiving it next time 2024-12-07T13:23:42,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741837_1013 (size=12399) 2024-12-07T13:23:42,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741837_1013 (size=12399) 2024-12-07T13:23:42,887 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:45,096 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:47,304 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:49,567 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 256 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:49,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45921 {}] regionserver.HRegion(8855): Flush requested on 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:23:49,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9990b6f765d25fa8a30e2342f021d04a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:23:49,770 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:49,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/5716612b5de54473ac672e144dca7adb is 1080, key is row0008/info:/1733577816447/Put/seqid=0 2024-12-07T13:23:49,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741840_1016 (size=12509) 2024-12-07T13:23:49,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741840_1016 (size=12509) 2024-12-07T13:23:49,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/5716612b5de54473ac672e144dca7adb 2024-12-07T13:23:49,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/5716612b5de54473ac672e144dca7adb as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/5716612b5de54473ac672e144dca7adb 2024-12-07T13:23:49,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/5716612b5de54473ac672e144dca7adb, entries=7, sequenceid=21, filesize=12.2 K 2024-12-07T13:23:50,006 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:50,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9990b6f765d25fa8a30e2342f021d04a in 439ms, sequenceid=21, compaction requested=false 2024-12-07T13:23:50,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9990b6f765d25fa8a30e2342f021d04a: 2024-12-07T13:23:50,007 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-07T13:23:50,007 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:23:50,009 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/e6c15068c4e74ad9af3feeb0531024a9 because midkey is the same as first or last row 2024-12-07T13:23:51,775 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:52,126 INFO [master/c7c455b68129:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-07T13:23:52,127 INFO [master/c7c455b68129:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-07T13:23:53,982 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:53,987 WARN [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:53,987 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C45921%2C1733577789348:(num 1733577822463) roll requested 2024-12-07T13:23:53,988 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C45921%2C1733577789348.1733577833988 2024-12-07T13:23:54,201 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:54,202 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:54,202 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:54,203 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:54,203 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:54,203 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:23:54,203 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577822463 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577833988 2024-12-07T13:23:54,205 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35967:35967),(127.0.0.1/127.0.0.1:37739:37739)] 2024-12-07T13:23:54,205 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577822463 is not closed yet, will try archiving it next time 2024-12-07T13:23:54,205 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577802319 to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs/c7c455b68129%2C45921%2C1733577789348.1733577802319 2024-12-07T13:23:54,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741839_1015 (size=7739) 2024-12-07T13:23:54,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741839_1015 (size=7739) 2024-12-07T13:23:56,191 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:58,396 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:23:58,440 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9990b6f765d25fa8a30e2342f021d04a, had cached 0 bytes from a total of 25018 2024-12-07T13:24:00,603 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:02,809 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:04,814 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T13:24:04,815 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C45921%2C1733577789348.1733577844815 2024-12-07T13:24:07,181 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T13:24:09,837 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5014 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:09,841 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5014 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:09,841 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C45921%2C1733577789348:(num 1733577844815) roll requested 2024-12-07T13:24:09,841 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:09,841 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:09,842 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:09,842 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:09,842 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:09,842 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577833988 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577844815 2024-12-07T13:24:09,843 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35967:35967),(127.0.0.1/127.0.0.1:37739:37739)] 2024-12-07T13:24:09,843 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577833988 is not closed yet, will try archiving it next time 2024-12-07T13:24:09,844 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C45921%2C1733577789348.1733577849844 2024-12-07T13:24:09,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741841_1017 (size=4753) 2024-12-07T13:24:09,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741841_1017 (size=4753) 2024-12-07T13:24:14,847 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:14,848 WARN [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:14,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45921 {}] regionserver.HRegion(8855): Flush requested on 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:24:14,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9990b6f765d25fa8a30e2342f021d04a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:24:14,853 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:14,853 WARN [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:16,849 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T13:24:19,852 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:19,853 WARN [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:19,853 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:19,853 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:19,854 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:19,854 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:19,855 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:19,855 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577844815 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577849844 2024-12-07T13:24:19,858 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35967:35967),(127.0.0.1/127.0.0.1:37739:37739)] 2024-12-07T13:24:19,859 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577844815 is not closed yet, will try archiving it next time 2024-12-07T13:24:19,859 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C45921%2C1733577789348:(num 1733577849844) roll requested 2024-12-07T13:24:19,860 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C45921%2C1733577789348.1733577859860 2024-12-07T13:24:19,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741842_1018 (size=1569) 2024-12-07T13:24:19,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741842_1018 (size=1569) 2024-12-07T13:24:19,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/ff9594b9e4c74828ab196247ccb4b9cd is 1080, key is row0015/info:/1733577831573/Put/seqid=0 2024-12-07T13:24:19,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741844_1020 (size=12509) 2024-12-07T13:24:19,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741844_1020 (size=12509) 2024-12-07T13:24:19,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/ff9594b9e4c74828ab196247ccb4b9cd 2024-12-07T13:24:19,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/ff9594b9e4c74828ab196247ccb4b9cd as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/ff9594b9e4c74828ab196247ccb4b9cd 2024-12-07T13:24:19,889 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/ff9594b9e4c74828ab196247ccb4b9cd, entries=7, sequenceid=31, filesize=12.2 K 2024-12-07T13:24:24,877 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:24,877 WARN [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:24,891 INFO [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:24,891 WARN [FSHLog-0-hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba-prefix:c7c455b68129,45921,1733577789348 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41333,DS-0f51dbbd-c381-41fa-93a3-d43428451116,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-7c972b3a-16d7-447b-88aa-e9490bb58535,DISK]] 2024-12-07T13:24:24,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9990b6f765d25fa8a30e2342f021d04a in 10043ms, sequenceid=31, compaction requested=true 2024-12-07T13:24:24,892 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9990b6f765d25fa8a30e2342f021d04a: 2024-12-07T13:24:24,892 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,892 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,892 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-07T13:24:24,892 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:24:24,892 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,892 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/e6c15068c4e74ad9af3feeb0531024a9 because midkey is the same as first or last row 2024-12-07T13:24:24,893 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,893 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577849844 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577859860 2024-12-07T13:24:24,895 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37739:37739),(127.0.0.1/127.0.0.1:35967:35967)] 2024-12-07T13:24:24,895 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577849844 is not closed yet, will try archiving it next time 2024-12-07T13:24:24,895 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577822463 to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs/c7c455b68129%2C45921%2C1733577789348.1733577822463 2024-12-07T13:24:24,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9990b6f765d25fa8a30e2342f021d04a:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:24:24,895 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C45921%2C1733577789348:(num 1733577859860) roll requested 2024-12-07T13:24:24,896 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C45921%2C1733577789348.1733577864895 2024-12-07T13:24:24,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741843_1019 (size=438) 2024-12-07T13:24:24,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741843_1019 (size=438) 2024-12-07T13:24:24,899 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577833988 to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs/c7c455b68129%2C45921%2C1733577789348.1733577833988 2024-12-07T13:24:24,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:24:24,899 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:24:24,901 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577844815 to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs/c7c455b68129%2C45921%2C1733577789348.1733577844815 2024-12-07T13:24:24,903 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:24:24,903 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577849844 to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs/c7c455b68129%2C45921%2C1733577789348.1733577849844 2024-12-07T13:24:24,905 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.HStore(1541): 9990b6f765d25fa8a30e2342f021d04a/info is initiating minor compaction (all files) 2024-12-07T13:24:24,906 INFO [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9990b6f765d25fa8a30e2342f021d04a/info in TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:24:24,906 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,906 INFO [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/e6c15068c4e74ad9af3feeb0531024a9, hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/5716612b5de54473ac672e144dca7adb, hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/ff9594b9e4c74828ab196247ccb4b9cd] into tmpdir=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp, totalSize=36.6 K 2024-12-07T13:24:24,906 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,906 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,906 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,906 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,907 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577859860 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577864895 2024-12-07T13:24:24,907 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] compactions.Compactor(225): Compacting e6c15068c4e74ad9af3feeb0531024a9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733577802343 2024-12-07T13:24:24,907 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37739:37739),(127.0.0.1/127.0.0.1:35967:35967)] 2024-12-07T13:24:24,908 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577859860 is not closed yet, will try archiving it next time 2024-12-07T13:24:24,908 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C45921%2C1733577789348.1733577864908 2024-12-07T13:24:24,908 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5716612b5de54473ac672e144dca7adb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733577816447 2024-12-07T13:24:24,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741845_1021 (size=93) 2024-12-07T13:24:24,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741845_1021 (size=93) 2024-12-07T13:24:24,910 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] compactions.Compactor(225): Compacting ff9594b9e4c74828ab196247ccb4b9cd, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733577831573 2024-12-07T13:24:24,919 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,919 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,919 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,919 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,919 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:24,919 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577864895 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577864908 2024-12-07T13:24:24,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741846_1022 (size=1258) 2024-12-07T13:24:24,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741846_1022 (size=1258) 2024-12-07T13:24:24,922 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577859860 is not closed yet, will try archiving it next time 2024-12-07T13:24:24,924 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37739:37739),(127.0.0.1/127.0.0.1:35967:35967)] 2024-12-07T13:24:24,924 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577859860 is not closed yet, will try archiving it next time 2024-12-07T13:24:24,938 INFO [RS:0;c7c455b68129:45921-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9990b6f765d25fa8a30e2342f021d04a#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:24:24,940 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/f9343a5b98054388897f35a845957cdd is 1080, key is row0001/info:/1733577802343/Put/seqid=0 2024-12-07T13:24:24,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741848_1024 (size=27710) 2024-12-07T13:24:24,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741848_1024 (size=27710) 2024-12-07T13:24:24,957 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/f9343a5b98054388897f35a845957cdd as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/f9343a5b98054388897f35a845957cdd 2024-12-07T13:24:24,973 INFO [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9990b6f765d25fa8a30e2342f021d04a/info of 9990b6f765d25fa8a30e2342f021d04a into f9343a5b98054388897f35a845957cdd(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:24:24,974 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9990b6f765d25fa8a30e2342f021d04a: 2024-12-07T13:24:24,975 INFO [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a., storeName=9990b6f765d25fa8a30e2342f021d04a/info, priority=13, startTime=1733577864895; duration=0sec 2024-12-07T13:24:24,975 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-07T13:24:24,976 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:24:24,976 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/f9343a5b98054388897f35a845957cdd because midkey is the same as first or last row 2024-12-07T13:24:24,976 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-07T13:24:24,976 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:24:24,976 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/f9343a5b98054388897f35a845957cdd because midkey is the same as first or last row 2024-12-07T13:24:24,976 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-07T13:24:24,976 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:24:24,976 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/f9343a5b98054388897f35a845957cdd because midkey is the same as first or last row 2024-12-07T13:24:24,976 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:24:24,977 DEBUG [RS:0;c7c455b68129:45921-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9990b6f765d25fa8a30e2342f021d04a:info 2024-12-07T13:24:25,311 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/WALs/c7c455b68129,45921,1733577789348/c7c455b68129%2C45921%2C1733577789348.1733577859860 to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs/c7c455b68129%2C45921%2C1733577789348.1733577859860 2024-12-07T13:24:36,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45921 {}] regionserver.HRegion(8855): Flush requested on 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:24:36,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9990b6f765d25fa8a30e2342f021d04a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:24:36,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/ec3764b6f3644df8a4302e7b98960c06 is 1080, key is row0022/info:/1733577864909/Put/seqid=0 2024-12-07T13:24:36,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741849_1025 (size=12509) 2024-12-07T13:24:36,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741849_1025 (size=12509) 2024-12-07T13:24:36,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/ec3764b6f3644df8a4302e7b98960c06 2024-12-07T13:24:36,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/ec3764b6f3644df8a4302e7b98960c06 as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/ec3764b6f3644df8a4302e7b98960c06 2024-12-07T13:24:36,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/ec3764b6f3644df8a4302e7b98960c06, entries=7, sequenceid=42, filesize=12.2 K 2024-12-07T13:24:36,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9990b6f765d25fa8a30e2342f021d04a in 35ms, sequenceid=42, compaction requested=false 2024-12-07T13:24:36,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9990b6f765d25fa8a30e2342f021d04a: 2024-12-07T13:24:36,997 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-07T13:24:36,997 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:24:36,997 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/f9343a5b98054388897f35a845957cdd because midkey is the same as first or last row 2024-12-07T13:24:37,181 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T13:24:43,441 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9990b6f765d25fa8a30e2342f021d04a, had cached 0 bytes from a total of 40219 2024-12-07T13:24:45,033 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T13:24:45,034 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:24:45,034 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:24:45,040 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:45,041 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:45,041 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T13:24:45,041 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T13:24:45,042 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=888866681, stopped=false 2024-12-07T13:24:45,042 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c7c455b68129,34945,1733577788070 2024-12-07T13:24:45,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:24:45,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:24:45,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:45,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:45,077 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:24:45,078 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:24:45,078 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:24:45,079 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:24:45,079 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:45,079 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:24:45,079 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c7c455b68129,45921,1733577789348' ***** 2024-12-07T13:24:45,080 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T13:24:45,080 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T13:24:45,080 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T13:24:45,080 INFO [RS:0;c7c455b68129:45921 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T13:24:45,081 INFO [RS:0;c7c455b68129:45921 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T13:24:45,081 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(3091): Received CLOSE for 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:24:45,081 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(959): stopping server c7c455b68129,45921,1733577789348 2024-12-07T13:24:45,081 INFO [RS:0;c7c455b68129:45921 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:24:45,081 INFO [RS:0;c7c455b68129:45921 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c7c455b68129:45921. 2024-12-07T13:24:45,081 DEBUG [RS:0;c7c455b68129:45921 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:24:45,081 DEBUG [RS:0;c7c455b68129:45921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:45,082 INFO [RS:0;c7c455b68129:45921 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T13:24:45,082 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9990b6f765d25fa8a30e2342f021d04a, disabling compactions & flushes 2024-12-07T13:24:45,082 INFO [RS:0;c7c455b68129:45921 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T13:24:45,082 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:24:45,082 INFO [RS:0;c7c455b68129:45921 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T13:24:45,082 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:24:45,082 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. after waiting 0 ms 2024-12-07T13:24:45,082 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T13:24:45,082 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:24:45,082 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 9990b6f765d25fa8a30e2342f021d04a 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-07T13:24:45,082 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T13:24:45,082 DEBUG [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 9990b6f765d25fa8a30e2342f021d04a=TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.} 2024-12-07T13:24:45,082 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:24:45,082 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:24:45,082 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:24:45,082 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:24:45,082 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:24:45,082 DEBUG [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9990b6f765d25fa8a30e2342f021d04a 2024-12-07T13:24:45,083 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-07T13:24:45,088 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/e7b30bb6c83c42f6b6f46d7d90ea8c24 is 1080, key is row0029/info:/1733577878967/Put/seqid=0 2024-12-07T13:24:45,108 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/.tmp/info/66e8dacc6e4c429eb189372a069defe0 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a./info:regioninfo/1733577793464/Put/seqid=0 2024-12-07T13:24:45,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741850_1026 (size=8193) 2024-12-07T13:24:45,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741850_1026 (size=8193) 2024-12-07T13:24:45,112 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/e7b30bb6c83c42f6b6f46d7d90ea8c24 2024-12-07T13:24:45,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741851_1027 (size=7016) 2024-12-07T13:24:45,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741851_1027 (size=7016) 2024-12-07T13:24:45,120 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/.tmp/info/66e8dacc6e4c429eb189372a069defe0 2024-12-07T13:24:45,126 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/.tmp/info/e7b30bb6c83c42f6b6f46d7d90ea8c24 as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/e7b30bb6c83c42f6b6f46d7d90ea8c24 2024-12-07T13:24:45,134 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/e7b30bb6c83c42f6b6f46d7d90ea8c24, entries=3, sequenceid=48, filesize=8.0 K 2024-12-07T13:24:45,136 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9990b6f765d25fa8a30e2342f021d04a in 54ms, sequenceid=48, compaction requested=true 2024-12-07T13:24:45,137 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/e6c15068c4e74ad9af3feeb0531024a9, hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/5716612b5de54473ac672e144dca7adb, hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/ff9594b9e4c74828ab196247ccb4b9cd] to archive 2024-12-07T13:24:45,139 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T13:24:45,142 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/e6c15068c4e74ad9af3feeb0531024a9 to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/e6c15068c4e74ad9af3feeb0531024a9 2024-12-07T13:24:45,145 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/5716612b5de54473ac672e144dca7adb to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/5716612b5de54473ac672e144dca7adb 2024-12-07T13:24:45,145 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/.tmp/ns/7ac0af9a2c074fc08d6bdd2b547a670e is 43, key is default/ns:d/1733577792013/Put/seqid=0 2024-12-07T13:24:45,146 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/ff9594b9e4c74828ab196247ccb4b9cd to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/info/ff9594b9e4c74828ab196247ccb4b9cd 2024-12-07T13:24:45,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741852_1028 (size=5153) 2024-12-07T13:24:45,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741852_1028 (size=5153) 2024-12-07T13:24:45,151 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/.tmp/ns/7ac0af9a2c074fc08d6bdd2b547a670e 2024-12-07T13:24:45,160 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c7c455b68129:34945 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-07T13:24:45,164 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e6c15068c4e74ad9af3feeb0531024a9=12509, 5716612b5de54473ac672e144dca7adb=12509, ff9594b9e4c74828ab196247ccb4b9cd=12509] 2024-12-07T13:24:45,169 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/default/TestLogRolling-testSlowSyncLogRolling/9990b6f765d25fa8a30e2342f021d04a/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-07T13:24:45,172 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:24:45,172 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9990b6f765d25fa8a30e2342f021d04a: Waiting for close lock at 1733577885081Running coprocessor pre-close hooks at 1733577885082 (+1 ms)Disabling compacts and flushes for region at 1733577885082Disabling writes for close at 1733577885082Obtaining lock to block concurrent updates at 1733577885082Preparing flush snapshotting stores in 9990b6f765d25fa8a30e2342f021d04a at 1733577885082Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733577885082Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. at 1733577885083 (+1 ms)Flushing 9990b6f765d25fa8a30e2342f021d04a/info: creating writer at 1733577885083Flushing 9990b6f765d25fa8a30e2342f021d04a/info: appending metadata at 1733577885088 (+5 ms)Flushing 9990b6f765d25fa8a30e2342f021d04a/info: closing flushed file at 1733577885088Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f9547d5: reopening flushed file at 1733577885125 (+37 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9990b6f765d25fa8a30e2342f021d04a in 54ms, sequenceid=48, compaction requested=true at 1733577885136 (+11 ms)Writing region close event to WAL at 1733577885165 (+29 ms)Running coprocessor post-close hooks at 1733577885170 (+5 ms)Closed at 1733577885172 (+2 ms) 2024-12-07T13:24:45,172 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733577792250.9990b6f765d25fa8a30e2342f021d04a. 2024-12-07T13:24:45,175 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/.tmp/table/752d1a48db524ff69346f4b2543b278b is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733577793481/Put/seqid=0 2024-12-07T13:24:45,177 INFO [regionserver/c7c455b68129:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:24:45,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741853_1029 (size=5396) 2024-12-07T13:24:45,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741853_1029 (size=5396) 2024-12-07T13:24:45,182 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/.tmp/table/752d1a48db524ff69346f4b2543b278b 2024-12-07T13:24:45,190 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/.tmp/info/66e8dacc6e4c429eb189372a069defe0 as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/info/66e8dacc6e4c429eb189372a069defe0 2024-12-07T13:24:45,198 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/info/66e8dacc6e4c429eb189372a069defe0, entries=10, sequenceid=11, filesize=6.9 K 2024-12-07T13:24:45,199 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/.tmp/ns/7ac0af9a2c074fc08d6bdd2b547a670e as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/ns/7ac0af9a2c074fc08d6bdd2b547a670e 2024-12-07T13:24:45,206 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/ns/7ac0af9a2c074fc08d6bdd2b547a670e, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T13:24:45,208 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/.tmp/table/752d1a48db524ff69346f4b2543b278b as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/table/752d1a48db524ff69346f4b2543b278b 2024-12-07T13:24:45,215 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/table/752d1a48db524ff69346f4b2543b278b, entries=2, sequenceid=11, filesize=5.3 K 2024-12-07T13:24:45,216 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false 2024-12-07T13:24:45,221 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T13:24:45,222 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:24:45,222 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:24:45,222 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733577885082Running coprocessor pre-close hooks at 1733577885082Disabling compacts and flushes for region at 1733577885082Disabling writes for close at 1733577885082Obtaining lock to block concurrent updates at 1733577885083 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733577885083Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733577885083Flushing stores of hbase:meta,,1.1588230740 at 1733577885084 (+1 ms)Flushing 1588230740/info: creating writer at 1733577885084Flushing 1588230740/info: appending metadata at 1733577885108 (+24 ms)Flushing 1588230740/info: closing flushed file at 1733577885108Flushing 1588230740/ns: creating writer at 1733577885129 (+21 ms)Flushing 1588230740/ns: appending metadata at 1733577885144 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1733577885144Flushing 1588230740/table: creating writer at 1733577885159 (+15 ms)Flushing 1588230740/table: appending metadata at 1733577885175 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733577885175Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65e6b873: reopening flushed file at 1733577885189 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7bba3a9d: reopening flushed file at 1733577885198 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@fb3b213: reopening flushed file at 1733577885207 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false at 1733577885216 (+9 ms)Writing region close event to WAL at 1733577885217 (+1 ms)Running coprocessor post-close hooks at 1733577885222 (+5 ms)Closed at 1733577885222 2024-12-07T13:24:45,223 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T13:24:45,264 INFO [regionserver/c7c455b68129:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T13:24:45,265 INFO [regionserver/c7c455b68129:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T13:24:45,283 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(976): stopping server c7c455b68129,45921,1733577789348; all regions closed. 2024-12-07T13:24:45,286 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,286 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,287 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,287 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,287 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741834_1010 (size=3066) 2024-12-07T13:24:45,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741834_1010 (size=3066) 2024-12-07T13:24:45,298 DEBUG [RS:0;c7c455b68129:45921 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs 2024-12-07T13:24:45,298 INFO [RS:0;c7c455b68129:45921 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C45921%2C1733577789348.meta:.meta(num 1733577791841) 2024-12-07T13:24:45,299 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,299 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,299 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,300 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,300 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741847_1023 (size=12695) 2024-12-07T13:24:45,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741847_1023 (size=12695) 2024-12-07T13:24:45,307 DEBUG [RS:0;c7c455b68129:45921 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/oldWALs 2024-12-07T13:24:45,307 INFO [RS:0;c7c455b68129:45921 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C45921%2C1733577789348:(num 1733577864908) 2024-12-07T13:24:45,307 DEBUG [RS:0;c7c455b68129:45921 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:45,307 INFO [RS:0;c7c455b68129:45921 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:24:45,307 INFO [RS:0;c7c455b68129:45921 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:24:45,308 INFO [RS:0;c7c455b68129:45921 {}] hbase.ChoreService(370): Chore service for: regionserver/c7c455b68129:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T13:24:45,308 INFO [RS:0;c7c455b68129:45921 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:24:45,308 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:24:45,309 INFO [RS:0;c7c455b68129:45921 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45921 2024-12-07T13:24:45,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:24:45,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c7c455b68129,45921,1733577789348 2024-12-07T13:24:45,323 INFO [RS:0;c7c455b68129:45921 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:24:45,324 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c7c455b68129,45921,1733577789348] 2024-12-07T13:24:45,344 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c7c455b68129,45921,1733577789348 already deleted, retry=false 2024-12-07T13:24:45,344 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c7c455b68129,45921,1733577789348 expired; onlineServers=0 2024-12-07T13:24:45,344 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c7c455b68129,34945,1733577788070' ***** 2024-12-07T13:24:45,344 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T13:24:45,345 INFO [M:0;c7c455b68129:34945 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:24:45,345 INFO [M:0;c7c455b68129:34945 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:24:45,345 DEBUG [M:0;c7c455b68129:34945 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T13:24:45,346 DEBUG [M:0;c7c455b68129:34945 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T13:24:45,346 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T13:24:45,346 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577791059 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577791059,5,FailOnTimeoutGroup] 2024-12-07T13:24:45,346 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577791058 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577791058,5,FailOnTimeoutGroup] 2024-12-07T13:24:45,346 INFO [M:0;c7c455b68129:34945 {}] hbase.ChoreService(370): Chore service for: master/c7c455b68129:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T13:24:45,346 INFO [M:0;c7c455b68129:34945 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:24:45,347 DEBUG [M:0;c7c455b68129:34945 {}] master.HMaster(1795): Stopping service threads 2024-12-07T13:24:45,347 INFO [M:0;c7c455b68129:34945 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T13:24:45,347 INFO [M:0;c7c455b68129:34945 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:24:45,348 INFO [M:0;c7c455b68129:34945 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T13:24:45,348 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T13:24:45,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T13:24:45,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:45,355 DEBUG [M:0;c7c455b68129:34945 {}] zookeeper.ZKUtil(347): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T13:24:45,355 WARN [M:0;c7c455b68129:34945 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T13:24:45,356 INFO [M:0;c7c455b68129:34945 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/.lastflushedseqids 2024-12-07T13:24:45,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741854_1030 (size=130) 2024-12-07T13:24:45,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741854_1030 (size=130) 2024-12-07T13:24:45,374 INFO [M:0;c7c455b68129:34945 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T13:24:45,374 INFO [M:0;c7c455b68129:34945 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T13:24:45,374 DEBUG [M:0;c7c455b68129:34945 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:24:45,374 INFO [M:0;c7c455b68129:34945 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:45,374 DEBUG [M:0;c7c455b68129:34945 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:45,374 DEBUG [M:0;c7c455b68129:34945 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:24:45,374 DEBUG [M:0;c7c455b68129:34945 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:45,375 INFO [M:0;c7c455b68129:34945 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-12-07T13:24:45,390 DEBUG [M:0;c7c455b68129:34945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/94503c6b2f8a4995981dd10aafaaef26 is 82, key is hbase:meta,,1/info:regioninfo/1733577791909/Put/seqid=0 2024-12-07T13:24:45,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741855_1031 (size=5672) 2024-12-07T13:24:45,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741855_1031 (size=5672) 2024-12-07T13:24:45,396 INFO [M:0;c7c455b68129:34945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/94503c6b2f8a4995981dd10aafaaef26 2024-12-07T13:24:45,417 DEBUG [M:0;c7c455b68129:34945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e6e22561b9044678b729a42c5a9d63ba is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733577793489/Put/seqid=0 2024-12-07T13:24:45,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741856_1032 (size=6246) 2024-12-07T13:24:45,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741856_1032 (size=6246) 2024-12-07T13:24:45,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:24:45,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45921-0x1000071ce200001, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:24:45,434 INFO [RS:0;c7c455b68129:45921 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:24:45,434 INFO [RS:0;c7c455b68129:45921 {}] regionserver.HRegionServer(1031): Exiting; stopping=c7c455b68129,45921,1733577789348; zookeeper connection closed. 2024-12-07T13:24:45,435 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4e05b673 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4e05b673 2024-12-07T13:24:45,435 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T13:24:45,824 INFO [M:0;c7c455b68129:34945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e6e22561b9044678b729a42c5a9d63ba 2024-12-07T13:24:45,838 INFO [M:0;c7c455b68129:34945 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e6e22561b9044678b729a42c5a9d63ba 2024-12-07T13:24:45,854 DEBUG [M:0;c7c455b68129:34945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9831f73a2834767a0477a9fdf76d8ad is 69, key is c7c455b68129,45921,1733577789348/rs:state/1733577791093/Put/seqid=0 2024-12-07T13:24:45,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741857_1033 (size=5156) 2024-12-07T13:24:45,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741857_1033 (size=5156) 2024-12-07T13:24:45,864 INFO [M:0;c7c455b68129:34945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9831f73a2834767a0477a9fdf76d8ad 2024-12-07T13:24:45,883 DEBUG [M:0;c7c455b68129:34945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2707733b18494b17a5e5c3d9e0a5672f is 52, key is load_balancer_on/state:d/1733577792233/Put/seqid=0 2024-12-07T13:24:45,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741858_1034 (size=5056) 2024-12-07T13:24:45,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741858_1034 (size=5056) 2024-12-07T13:24:45,889 INFO [M:0;c7c455b68129:34945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2707733b18494b17a5e5c3d9e0a5672f 2024-12-07T13:24:45,895 DEBUG [M:0;c7c455b68129:34945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/94503c6b2f8a4995981dd10aafaaef26 as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/94503c6b2f8a4995981dd10aafaaef26 2024-12-07T13:24:45,901 INFO [M:0;c7c455b68129:34945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/94503c6b2f8a4995981dd10aafaaef26, entries=8, sequenceid=59, filesize=5.5 K 2024-12-07T13:24:45,903 DEBUG [M:0;c7c455b68129:34945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e6e22561b9044678b729a42c5a9d63ba as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e6e22561b9044678b729a42c5a9d63ba 2024-12-07T13:24:45,909 INFO [M:0;c7c455b68129:34945 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e6e22561b9044678b729a42c5a9d63ba 2024-12-07T13:24:45,909 INFO [M:0;c7c455b68129:34945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e6e22561b9044678b729a42c5a9d63ba, entries=6, sequenceid=59, filesize=6.1 K 2024-12-07T13:24:45,910 DEBUG [M:0;c7c455b68129:34945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c9831f73a2834767a0477a9fdf76d8ad as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c9831f73a2834767a0477a9fdf76d8ad 2024-12-07T13:24:45,916 INFO [M:0;c7c455b68129:34945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c9831f73a2834767a0477a9fdf76d8ad, entries=1, sequenceid=59, filesize=5.0 K 2024-12-07T13:24:45,917 DEBUG [M:0;c7c455b68129:34945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/2707733b18494b17a5e5c3d9e0a5672f as hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2707733b18494b17a5e5c3d9e0a5672f 2024-12-07T13:24:45,924 INFO [M:0;c7c455b68129:34945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/2707733b18494b17a5e5c3d9e0a5672f, entries=1, sequenceid=59, filesize=4.9 K 2024-12-07T13:24:45,925 INFO [M:0;c7c455b68129:34945 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 551ms, sequenceid=59, compaction requested=false 2024-12-07T13:24:45,927 INFO [M:0;c7c455b68129:34945 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:45,927 DEBUG [M:0;c7c455b68129:34945 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733577885374Disabling compacts and flushes for region at 1733577885374Disabling writes for close at 1733577885374Obtaining lock to block concurrent updates at 1733577885375 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733577885375Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1733577885375Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733577885376 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733577885376Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733577885390 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733577885390Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733577885402 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733577885416 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733577885416Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733577885838 (+422 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733577885853 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733577885853Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733577885869 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733577885882 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733577885882Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3609ff7c: reopening flushed file at 1733577885894 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61795b8c: reopening flushed file at 1733577885901 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60bd577c: reopening flushed file at 1733577885909 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2aecde05: reopening flushed file at 1733577885916 (+7 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 551ms, sequenceid=59, compaction requested=false at 1733577885925 (+9 ms)Writing region close event to WAL at 1733577885926 (+1 ms)Closed at 1733577885926 2024-12-07T13:24:45,927 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,927 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,928 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,928 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,928 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:45,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41333 is added to blk_1073741830_1006 (size=27961) 2024-12-07T13:24:45,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741830_1006 (size=27961) 2024-12-07T13:24:45,931 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:24:45,931 INFO [M:0;c7c455b68129:34945 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T13:24:45,931 INFO [M:0;c7c455b68129:34945 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34945 2024-12-07T13:24:45,931 INFO [M:0;c7c455b68129:34945 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:24:46,071 INFO [M:0;c7c455b68129:34945 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:24:46,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:24:46,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34945-0x1000071ce200000, quorum=127.0.0.1:55327, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:24:46,110 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:24:46,112 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:24:46,112 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:24:46,112 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:24:46,112 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/hadoop.log.dir/,STOPPED} 2024-12-07T13:24:46,115 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:24:46,115 WARN [BP-1090316692-172.17.0.3-1733577784020 heartbeating to localhost/127.0.0.1:33723 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:24:46,115 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:24:46,115 WARN [BP-1090316692-172.17.0.3-1733577784020 heartbeating to localhost/127.0.0.1:33723 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1090316692-172.17.0.3-1733577784020 (Datanode Uuid 37addea5-ca25-47c3-9bd4-4cccea3bd8fd) service to localhost/127.0.0.1:33723 2024-12-07T13:24:46,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/data/data3/current/BP-1090316692-172.17.0.3-1733577784020 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:24:46,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/data/data4/current/BP-1090316692-172.17.0.3-1733577784020 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:24:46,118 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:24:46,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:24:46,121 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:24:46,121 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:24:46,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:24:46,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/hadoop.log.dir/,STOPPED} 2024-12-07T13:24:46,122 WARN [BP-1090316692-172.17.0.3-1733577784020 heartbeating to localhost/127.0.0.1:33723 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:24:46,122 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:24:46,122 WARN [BP-1090316692-172.17.0.3-1733577784020 heartbeating to localhost/127.0.0.1:33723 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1090316692-172.17.0.3-1733577784020 (Datanode Uuid b936396c-8d64-4fa5-af0b-7ee10e6fbff6) service to localhost/127.0.0.1:33723 2024-12-07T13:24:46,123 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:24:46,123 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/data/data1/current/BP-1090316692-172.17.0.3-1733577784020 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:24:46,123 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/cluster_6a2d92fc-30de-1c20-275d-95db6869af63/data/data2/current/BP-1090316692-172.17.0.3-1733577784020 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:24:46,124 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:24:46,135 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:24:46,136 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:24:46,136 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:24:46,136 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:24:46,136 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/hadoop.log.dir/,STOPPED} 2024-12-07T13:24:46,145 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T13:24:46,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T13:24:46,184 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33723 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33723 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33723 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33723 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: master/c7c455b68129:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/c7c455b68129:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33723 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33723 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/c7c455b68129:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:33723 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@efc635d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:33723 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=156 (was 147) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=16069 (was 16547) 2024-12-07T13:24:46,190 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=156, ProcessCount=11, AvailableMemoryMB=16068 2024-12-07T13:24:46,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T13:24:46,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/hadoop.log.dir so I do NOT create it in target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74 2024-12-07T13:24:46,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7ca13325-5db5-49d1-0537-37ce6f9e6872/hadoop.tmp.dir so I do NOT create it in target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74 2024-12-07T13:24:46,191 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92, deleteOnExit=true 2024-12-07T13:24:46,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T13:24:46,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/test.cache.data in system properties and HBase conf 2024-12-07T13:24:46,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T13:24:46,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/hadoop.log.dir in system properties and HBase conf 2024-12-07T13:24:46,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T13:24:46,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T13:24:46,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T13:24:46,192 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T13:24:46,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:24:46,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:24:46,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T13:24:46,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:24:46,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T13:24:46,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T13:24:46,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:24:46,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:24:46,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T13:24:46,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/nfs.dump.dir in system properties and HBase conf 2024-12-07T13:24:46,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/java.io.tmpdir in system properties and HBase conf 2024-12-07T13:24:46,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:24:46,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T13:24:46,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T13:24:46,207 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:24:46,591 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:24:46,597 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:24:46,599 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:24:46,599 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:24:46,599 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:24:46,599 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:24:46,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:24:46,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:24:46,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55cb1221{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/java.io.tmpdir/jetty-localhost-33597-hadoop-hdfs-3_4_1-tests_jar-_-any-15697783914951312428/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:24:46,691 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:33597} 2024-12-07T13:24:46,691 INFO [Time-limited test {}] server.Server(415): Started @104532ms 2024-12-07T13:24:46,703 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:24:46,989 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:24:46,993 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:24:46,994 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:24:46,994 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:24:46,994 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:24:46,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:24:46,995 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:24:47,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4595827f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/java.io.tmpdir/jetty-localhost-41085-hadoop-hdfs-3_4_1-tests_jar-_-any-11151044702442285899/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:24:47,085 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:41085} 2024-12-07T13:24:47,085 INFO [Time-limited test {}] server.Server(415): Started @104926ms 2024-12-07T13:24:47,087 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:24:47,114 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:24:47,119 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:24:47,120 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:24:47,120 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:24:47,120 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:24:47,121 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:24:47,121 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:24:47,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@da5059a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/java.io.tmpdir/jetty-localhost-45701-hadoop-hdfs-3_4_1-tests_jar-_-any-14614343552188961624/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:24:47,212 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:45701} 2024-12-07T13:24:47,212 INFO [Time-limited test {}] server.Server(415): Started @105053ms 2024-12-07T13:24:47,214 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:24:48,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:24:48,428 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T13:24:48,431 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T13:24:48,433 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-07T13:24:48,791 WARN [Thread-451 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/data/data1/current/BP-974771455-172.17.0.3-1733577886218/current, will proceed with Du for space computation calculation, 2024-12-07T13:24:48,791 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/data/data2/current/BP-974771455-172.17.0.3-1733577886218/current, will proceed with Du for space computation calculation, 2024-12-07T13:24:48,810 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:24:48,813 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe6adc2d452cb0ce2 with lease ID 0x994e4f420d100fea: Processing first storage report for DS-eada3eb2-7db9-418c-b601-a77ab34681c9 from datanode DatanodeRegistration(127.0.0.1:41385, datanodeUuid=334e508b-26a7-407b-a216-ed4b22647e15, infoPort=37589, infoSecurePort=0, ipcPort=35653, storageInfo=lv=-57;cid=testClusterID;nsid=376735826;c=1733577886218) 2024-12-07T13:24:48,813 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6adc2d452cb0ce2 with lease ID 0x994e4f420d100fea: from storage DS-eada3eb2-7db9-418c-b601-a77ab34681c9 node DatanodeRegistration(127.0.0.1:41385, datanodeUuid=334e508b-26a7-407b-a216-ed4b22647e15, infoPort=37589, infoSecurePort=0, ipcPort=35653, storageInfo=lv=-57;cid=testClusterID;nsid=376735826;c=1733577886218), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:24:48,813 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe6adc2d452cb0ce2 with lease ID 0x994e4f420d100fea: Processing first storage report for DS-40794e76-8ee7-426d-a4d9-0e06fa456184 from datanode DatanodeRegistration(127.0.0.1:41385, datanodeUuid=334e508b-26a7-407b-a216-ed4b22647e15, infoPort=37589, infoSecurePort=0, ipcPort=35653, storageInfo=lv=-57;cid=testClusterID;nsid=376735826;c=1733577886218) 2024-12-07T13:24:48,813 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe6adc2d452cb0ce2 with lease ID 0x994e4f420d100fea: from storage DS-40794e76-8ee7-426d-a4d9-0e06fa456184 node DatanodeRegistration(127.0.0.1:41385, datanodeUuid=334e508b-26a7-407b-a216-ed4b22647e15, infoPort=37589, infoSecurePort=0, ipcPort=35653, storageInfo=lv=-57;cid=testClusterID;nsid=376735826;c=1733577886218), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:24:48,910 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/data/data4/current/BP-974771455-172.17.0.3-1733577886218/current, will proceed with Du for space computation calculation, 2024-12-07T13:24:48,911 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/data/data3/current/BP-974771455-172.17.0.3-1733577886218/current, will proceed with Du for space computation calculation, 2024-12-07T13:24:48,934 WARN [Thread-438 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:24:48,937 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9056705d0630fdbc with lease ID 0x994e4f420d100feb: Processing first storage report for DS-7311d4b9-5f39-433e-87c5-7b412731f2fe from datanode DatanodeRegistration(127.0.0.1:38195, datanodeUuid=acd5bcf6-67bd-4109-a55d-168ee323fbaa, infoPort=41069, infoSecurePort=0, ipcPort=46853, storageInfo=lv=-57;cid=testClusterID;nsid=376735826;c=1733577886218) 2024-12-07T13:24:48,937 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9056705d0630fdbc with lease ID 0x994e4f420d100feb: from storage DS-7311d4b9-5f39-433e-87c5-7b412731f2fe node DatanodeRegistration(127.0.0.1:38195, datanodeUuid=acd5bcf6-67bd-4109-a55d-168ee323fbaa, infoPort=41069, infoSecurePort=0, ipcPort=46853, storageInfo=lv=-57;cid=testClusterID;nsid=376735826;c=1733577886218), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:24:48,937 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9056705d0630fdbc with lease ID 0x994e4f420d100feb: Processing first storage report for DS-91ce7213-d9ab-42b0-9d0d-8749148cd42b from datanode DatanodeRegistration(127.0.0.1:38195, datanodeUuid=acd5bcf6-67bd-4109-a55d-168ee323fbaa, infoPort=41069, infoSecurePort=0, ipcPort=46853, storageInfo=lv=-57;cid=testClusterID;nsid=376735826;c=1733577886218) 2024-12-07T13:24:48,937 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9056705d0630fdbc with lease ID 0x994e4f420d100feb: from storage DS-91ce7213-d9ab-42b0-9d0d-8749148cd42b node DatanodeRegistration(127.0.0.1:38195, datanodeUuid=acd5bcf6-67bd-4109-a55d-168ee323fbaa, infoPort=41069, infoSecurePort=0, ipcPort=46853, storageInfo=lv=-57;cid=testClusterID;nsid=376735826;c=1733577886218), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:24:48,960 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74 2024-12-07T13:24:48,962 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/zookeeper_0, clientPort=57200, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T13:24:48,963 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57200 2024-12-07T13:24:48,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:48,965 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:48,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:24:48,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:24:48,979 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f with version=8 2024-12-07T13:24:48,979 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/hbase-staging 2024-12-07T13:24:48,982 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:24:48,982 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:48,982 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:48,982 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:24:48,982 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:48,982 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:24:48,982 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T13:24:48,982 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:24:48,983 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43255 2024-12-07T13:24:48,985 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:43255 connecting to ZooKeeper ensemble=127.0.0.1:57200 2024-12-07T13:24:49,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:432550x0, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:24:49,050 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43255-0x10000735b880000 connected 2024-12-07T13:24:49,155 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:49,160 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:49,164 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:24:49,165 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f, hbase.cluster.distributed=false 2024-12-07T13:24:49,167 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:24:49,168 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43255 2024-12-07T13:24:49,168 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43255 2024-12-07T13:24:49,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43255 2024-12-07T13:24:49,177 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43255 2024-12-07T13:24:49,177 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43255 2024-12-07T13:24:49,191 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:24:49,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:49,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:49,192 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:24:49,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:49,192 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:24:49,192 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T13:24:49,192 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:24:49,193 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35435 2024-12-07T13:24:49,194 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35435 connecting to ZooKeeper ensemble=127.0.0.1:57200 2024-12-07T13:24:49,194 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:49,196 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:49,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:354350x0, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:24:49,207 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:354350x0, quorum=127.0.0.1:57200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:24:49,207 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35435-0x10000735b880001 connected 2024-12-07T13:24:49,207 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T13:24:49,208 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T13:24:49,209 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T13:24:49,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:24:49,212 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35435 2024-12-07T13:24:49,213 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35435 2024-12-07T13:24:49,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35435 2024-12-07T13:24:49,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35435 2024-12-07T13:24:49,214 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35435 2024-12-07T13:24:49,225 DEBUG [M:0;c7c455b68129:43255 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c7c455b68129:43255 2024-12-07T13:24:49,226 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c7c455b68129,43255,1733577888981 2024-12-07T13:24:49,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:24:49,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:24:49,239 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c7c455b68129,43255,1733577888981 2024-12-07T13:24:49,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:49,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T13:24:49,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:49,249 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T13:24:49,250 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c7c455b68129,43255,1733577888981 from backup master directory 2024-12-07T13:24:49,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:24:49,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c7c455b68129,43255,1733577888981 2024-12-07T13:24:49,259 WARN [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:24:49,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:24:49,259 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c7c455b68129,43255,1733577888981 2024-12-07T13:24:49,265 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/hbase.id] with ID: 73197e75-2c9d-43ee-b718-831f193c1546 2024-12-07T13:24:49,265 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/.tmp/hbase.id 2024-12-07T13:24:49,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:24:49,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:24:49,274 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/.tmp/hbase.id]:[hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/hbase.id] 2024-12-07T13:24:49,288 INFO [master/c7c455b68129:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:49,288 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T13:24:49,290 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-07T13:24:49,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:49,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:49,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:24:49,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:24:49,312 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:24:49,313 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T13:24:49,313 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:24:49,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:24:49,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:24:49,321 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store 2024-12-07T13:24:49,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:24:49,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:24:49,329 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:24:49,330 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:24:49,330 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:49,330 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:49,330 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:24:49,330 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:49,330 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:49,330 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733577889329Disabling compacts and flushes for region at 1733577889329Disabling writes for close at 1733577889330 (+1 ms)Writing region close event to WAL at 1733577889330Closed at 1733577889330 2024-12-07T13:24:49,331 WARN [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/.initializing 2024-12-07T13:24:49,331 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/WALs/c7c455b68129,43255,1733577888981 2024-12-07T13:24:49,335 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C43255%2C1733577888981, suffix=, logDir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/WALs/c7c455b68129,43255,1733577888981, archiveDir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/oldWALs, maxLogs=10 2024-12-07T13:24:49,335 INFO [master/c7c455b68129:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C43255%2C1733577888981.1733577889335 2024-12-07T13:24:49,342 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/WALs/c7c455b68129,43255,1733577888981/c7c455b68129%2C43255%2C1733577888981.1733577889335 2024-12-07T13:24:49,343 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37589:37589),(127.0.0.1/127.0.0.1:41069:41069)] 2024-12-07T13:24:49,344 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:24:49,344 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:24:49,344 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,344 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T13:24:49,348 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:49,348 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:49,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T13:24:49,350 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:49,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:24:49,351 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T13:24:49,353 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:49,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:24:49,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T13:24:49,356 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:49,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:24:49,356 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,357 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,358 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,359 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,359 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,360 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T13:24:49,361 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:49,364 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:24:49,364 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=784511, jitterRate=-0.002443477511405945}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T13:24:49,365 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733577889344Initializing all the Stores at 1733577889345 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577889345Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577889346 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577889346Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577889346Cleaning up temporary data from old regions at 1733577889359 (+13 ms)Region opened successfully at 1733577889365 (+6 ms) 2024-12-07T13:24:49,365 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T13:24:49,369 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c4407c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:24:49,370 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T13:24:49,370 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T13:24:49,370 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T13:24:49,371 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T13:24:49,371 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T13:24:49,372 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T13:24:49,372 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T13:24:49,374 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T13:24:49,375 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T13:24:49,385 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T13:24:49,386 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T13:24:49,387 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T13:24:49,396 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T13:24:49,396 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T13:24:49,398 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T13:24:49,407 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T13:24:49,409 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T13:24:49,417 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T13:24:49,422 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T13:24:49,434 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T13:24:49,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:24:49,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:24:49,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:49,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:49,446 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c7c455b68129,43255,1733577888981, sessionid=0x10000735b880000, setting cluster-up flag (Was=false) 2024-12-07T13:24:49,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:49,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:49,502 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T13:24:49,507 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,43255,1733577888981 2024-12-07T13:24:49,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:49,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:49,565 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T13:24:49,570 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,43255,1733577888981 2024-12-07T13:24:49,574 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T13:24:49,577 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T13:24:49,578 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T13:24:49,578 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T13:24:49,578 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c7c455b68129,43255,1733577888981 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T13:24:49,581 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:24:49,581 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:24:49,581 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:24:49,581 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:24:49,581 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c7c455b68129:0, corePoolSize=10, maxPoolSize=10 2024-12-07T13:24:49,581 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,581 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:24:49,581 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,583 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733577919583 2024-12-07T13:24:49,583 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T13:24:49,583 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T13:24:49,583 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T13:24:49,583 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T13:24:49,583 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T13:24:49,583 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T13:24:49,584 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,584 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T13:24:49,584 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:24:49,584 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T13:24:49,584 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T13:24:49,585 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T13:24:49,585 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T13:24:49,585 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T13:24:49,585 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577889585,5,FailOnTimeoutGroup] 2024-12-07T13:24:49,586 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577889585,5,FailOnTimeoutGroup] 2024-12-07T13:24:49,586 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,586 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T13:24:49,586 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,586 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,586 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:49,587 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T13:24:49,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:24:49,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:24:49,598 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T13:24:49,598 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f 2024-12-07T13:24:49,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:24:49,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:24:49,607 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:24:49,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:24:49,610 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:24:49,610 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:49,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:49,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:24:49,612 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:24:49,612 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:49,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:49,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:24:49,614 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:24:49,614 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:49,615 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:49,615 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:24:49,617 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:24:49,617 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(746): ClusterId : 73197e75-2c9d-43ee-b718-831f193c1546 2024-12-07T13:24:49,617 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:49,617 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T13:24:49,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:49,617 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:24:49,618 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740 2024-12-07T13:24:49,619 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740 2024-12-07T13:24:49,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:24:49,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:24:49,621 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:24:49,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:24:49,624 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:24:49,625 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698542, jitterRate=-0.11175794899463654}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:24:49,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733577889607Initializing all the Stores at 1733577889608 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577889608Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577889609 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577889609Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577889609Cleaning up temporary data from old regions at 1733577889620 (+11 ms)Region opened successfully at 1733577889626 (+6 ms) 2024-12-07T13:24:49,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:24:49,626 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:24:49,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:24:49,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:24:49,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:24:49,626 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:24:49,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733577889626Disabling compacts and flushes for region at 1733577889626Disabling writes for close at 1733577889626Writing region close event to WAL at 1733577889626Closed at 1733577889626 2024-12-07T13:24:49,628 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:24:49,628 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T13:24:49,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T13:24:49,629 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T13:24:49,629 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T13:24:49,630 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:24:49,631 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T13:24:49,639 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T13:24:49,640 DEBUG [RS:0;c7c455b68129:35435 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f0a0b85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:24:49,651 DEBUG [RS:0;c7c455b68129:35435 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c7c455b68129:35435 2024-12-07T13:24:49,651 INFO [RS:0;c7c455b68129:35435 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T13:24:49,651 INFO [RS:0;c7c455b68129:35435 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T13:24:49,651 DEBUG [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T13:24:49,652 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(2659): reportForDuty to master=c7c455b68129,43255,1733577888981 with port=35435, startcode=1733577889191 2024-12-07T13:24:49,652 DEBUG [RS:0;c7c455b68129:35435 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T13:24:49,655 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50221, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T13:24:49,655 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43255 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c7c455b68129,35435,1733577889191 2024-12-07T13:24:49,655 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43255 {}] master.ServerManager(517): Registering regionserver=c7c455b68129,35435,1733577889191 2024-12-07T13:24:49,657 DEBUG [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f 2024-12-07T13:24:49,657 DEBUG [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42747 2024-12-07T13:24:49,658 DEBUG [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T13:24:49,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:24:49,670 DEBUG [RS:0;c7c455b68129:35435 {}] zookeeper.ZKUtil(111): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c7c455b68129,35435,1733577889191 2024-12-07T13:24:49,670 WARN [RS:0;c7c455b68129:35435 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:24:49,670 INFO [RS:0;c7c455b68129:35435 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:24:49,670 DEBUG [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/WALs/c7c455b68129,35435,1733577889191 2024-12-07T13:24:49,670 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c7c455b68129,35435,1733577889191] 2024-12-07T13:24:49,675 INFO [RS:0;c7c455b68129:35435 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T13:24:49,678 INFO [RS:0;c7c455b68129:35435 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T13:24:49,679 INFO [RS:0;c7c455b68129:35435 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T13:24:49,679 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,679 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T13:24:49,680 INFO [RS:0;c7c455b68129:35435 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T13:24:49,680 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:24:49,681 DEBUG [RS:0;c7c455b68129:35435 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:24:49,682 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,682 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,682 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,682 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,682 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,682 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,35435,1733577889191-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:24:49,695 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T13:24:49,695 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,35435,1733577889191-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,696 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,696 INFO [RS:0;c7c455b68129:35435 {}] regionserver.Replication(171): c7c455b68129,35435,1733577889191 started 2024-12-07T13:24:49,707 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:49,708 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(1482): Serving as c7c455b68129,35435,1733577889191, RpcServer on c7c455b68129/172.17.0.3:35435, sessionid=0x10000735b880001 2024-12-07T13:24:49,708 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T13:24:49,708 DEBUG [RS:0;c7c455b68129:35435 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c7c455b68129,35435,1733577889191 2024-12-07T13:24:49,708 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,35435,1733577889191' 2024-12-07T13:24:49,708 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T13:24:49,709 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T13:24:49,709 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T13:24:49,709 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T13:24:49,709 DEBUG [RS:0;c7c455b68129:35435 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c7c455b68129,35435,1733577889191 2024-12-07T13:24:49,709 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,35435,1733577889191' 2024-12-07T13:24:49,709 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T13:24:49,710 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T13:24:49,710 DEBUG [RS:0;c7c455b68129:35435 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T13:24:49,710 INFO [RS:0;c7c455b68129:35435 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T13:24:49,710 INFO [RS:0;c7c455b68129:35435 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T13:24:49,781 WARN [c7c455b68129:43255 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T13:24:49,815 INFO [RS:0;c7c455b68129:35435 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C35435%2C1733577889191, suffix=, logDir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/WALs/c7c455b68129,35435,1733577889191, archiveDir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/oldWALs, maxLogs=32 2024-12-07T13:24:49,820 INFO [RS:0;c7c455b68129:35435 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C35435%2C1733577889191.1733577889820 2024-12-07T13:24:49,827 INFO [RS:0;c7c455b68129:35435 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/WALs/c7c455b68129,35435,1733577889191/c7c455b68129%2C35435%2C1733577889191.1733577889820 2024-12-07T13:24:49,830 DEBUG [RS:0;c7c455b68129:35435 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37589:37589),(127.0.0.1/127.0.0.1:41069:41069)] 2024-12-07T13:24:50,032 DEBUG [c7c455b68129:43255 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T13:24:50,034 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c7c455b68129,35435,1733577889191 2024-12-07T13:24:50,039 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,35435,1733577889191, state=OPENING 2024-12-07T13:24:50,049 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T13:24:50,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:50,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:50,061 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:24:50,061 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:24:50,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,35435,1733577889191}] 2024-12-07T13:24:50,061 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:24:50,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:24:50,183 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:24:50,216 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T13:24:50,219 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56645, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T13:24:50,225 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T13:24:50,225 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:24:50,229 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C35435%2C1733577889191.meta, suffix=.meta, logDir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/WALs/c7c455b68129,35435,1733577889191, archiveDir=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/oldWALs, maxLogs=32 2024-12-07T13:24:50,231 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C35435%2C1733577889191.meta.1733577890231.meta 2024-12-07T13:24:50,237 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/WALs/c7c455b68129,35435,1733577889191/c7c455b68129%2C35435%2C1733577889191.meta.1733577890231.meta 2024-12-07T13:24:50,238 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41069:41069),(127.0.0.1/127.0.0.1:37589:37589)] 2024-12-07T13:24:50,238 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:24:50,239 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T13:24:50,239 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T13:24:50,239 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T13:24:50,239 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T13:24:50,239 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:24:50,239 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T13:24:50,239 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T13:24:50,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:24:50,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:24:50,242 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:50,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:50,243 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:24:50,244 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:24:50,244 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:50,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:50,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:24:50,245 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:24:50,245 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:50,246 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:50,246 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:24:50,247 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:24:50,247 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:50,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:50,247 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:24:50,248 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740 2024-12-07T13:24:50,249 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740 2024-12-07T13:24:50,251 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:24:50,251 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:24:50,251 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:24:50,253 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:24:50,254 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872668, jitterRate=0.10965566337108612}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:24:50,254 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T13:24:50,254 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733577890239Writing region info on filesystem at 1733577890239Initializing all the Stores at 1733577890241 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577890241Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577890241Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577890241Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577890241Cleaning up temporary data from old regions at 1733577890251 (+10 ms)Running coprocessor post-open hooks at 1733577890254 (+3 ms)Region opened successfully at 1733577890254 2024-12-07T13:24:50,256 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733577890216 2024-12-07T13:24:50,258 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T13:24:50,259 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T13:24:50,260 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,35435,1733577889191 2024-12-07T13:24:50,261 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,35435,1733577889191, state=OPEN 2024-12-07T13:24:50,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:24:50,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:24:50,329 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c7c455b68129,35435,1733577889191 2024-12-07T13:24:50,329 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:24:50,329 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:24:50,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T13:24:50,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,35435,1733577889191 in 268 msec 2024-12-07T13:24:50,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T13:24:50,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 708 msec 2024-12-07T13:24:50,343 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:24:50,343 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T13:24:50,345 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:24:50,345 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,35435,1733577889191, seqNum=-1] 2024-12-07T13:24:50,345 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:24:50,347 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49745, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:24:50,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 777 msec 2024-12-07T13:24:50,355 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733577890354, completionTime=-1 2024-12-07T13:24:50,355 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T13:24:50,355 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T13:24:50,357 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T13:24:50,357 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733577950357 2024-12-07T13:24:50,357 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733578010357 2024-12-07T13:24:50,357 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-07T13:24:50,358 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,43255,1733577888981-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:50,358 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,43255,1733577888981-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:50,358 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,43255,1733577888981-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:50,358 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c7c455b68129:43255, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:50,358 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:50,358 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:50,360 DEBUG [master/c7c455b68129:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T13:24:50,364 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.105sec 2024-12-07T13:24:50,364 INFO [master/c7c455b68129:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T13:24:50,364 INFO [master/c7c455b68129:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T13:24:50,364 INFO [master/c7c455b68129:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T13:24:50,364 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T13:24:50,365 INFO [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T13:24:50,365 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,43255,1733577888981-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:24:50,365 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,43255,1733577888981-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T13:24:50,371 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T13:24:50,371 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T13:24:50,371 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,43255,1733577888981-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:50,417 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b9379ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:24:50,418 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c7c455b68129,43255,-1 for getting cluster id 2024-12-07T13:24:50,418 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T13:24:50,420 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '73197e75-2c9d-43ee-b718-831f193c1546' 2024-12-07T13:24:50,421 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T13:24:50,421 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "73197e75-2c9d-43ee-b718-831f193c1546" 2024-12-07T13:24:50,422 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ef451c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:24:50,422 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c7c455b68129,43255,-1] 2024-12-07T13:24:50,423 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T13:24:50,424 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:50,426 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44592, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T13:24:50,428 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3aadf114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:24:50,429 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:24:50,431 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,35435,1733577889191, seqNum=-1] 2024-12-07T13:24:50,431 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:24:50,434 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:24:50,436 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c7c455b68129,43255,1733577888981 2024-12-07T13:24:50,437 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:50,441 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T13:24:50,441 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T13:24:50,441 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:24:50,441 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:24:50,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:50,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:50,441 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T13:24:50,442 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T13:24:50,442 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=292436729, stopped=false 2024-12-07T13:24:50,442 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c7c455b68129,43255,1733577888981 2024-12-07T13:24:50,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:24:50,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:24:50,465 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:24:50,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:50,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:50,466 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:24:50,466 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:24:50,466 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:50,466 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:24:50,466 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:24:50,466 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c7c455b68129,35435,1733577889191' ***** 2024-12-07T13:24:50,466 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T13:24:50,466 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T13:24:50,466 INFO [RS:0;c7c455b68129:35435 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T13:24:50,466 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T13:24:50,467 INFO [RS:0;c7c455b68129:35435 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T13:24:50,467 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(959): stopping server c7c455b68129,35435,1733577889191 2024-12-07T13:24:50,467 INFO [RS:0;c7c455b68129:35435 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:24:50,467 INFO [RS:0;c7c455b68129:35435 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c7c455b68129:35435. 2024-12-07T13:24:50,467 DEBUG [RS:0;c7c455b68129:35435 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:24:50,467 DEBUG [RS:0;c7c455b68129:35435 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:50,467 INFO [RS:0;c7c455b68129:35435 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T13:24:50,467 INFO [RS:0;c7c455b68129:35435 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T13:24:50,467 INFO [RS:0;c7c455b68129:35435 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T13:24:50,467 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T13:24:50,467 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T13:24:50,468 DEBUG [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T13:24:50,468 DEBUG [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T13:24:50,468 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:24:50,468 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:24:50,468 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:24:50,468 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:24:50,468 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:24:50,468 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-07T13:24:50,483 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740/.tmp/ns/181b97867d3d45efb8336d325487db4a is 43, key is default/ns:d/1733577890347/Put/seqid=0 2024-12-07T13:24:50,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741835_1011 (size=5153) 2024-12-07T13:24:50,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741835_1011 (size=5153) 2024-12-07T13:24:50,490 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740/.tmp/ns/181b97867d3d45efb8336d325487db4a 2024-12-07T13:24:50,498 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740/.tmp/ns/181b97867d3d45efb8336d325487db4a as hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740/ns/181b97867d3d45efb8336d325487db4a 2024-12-07T13:24:50,505 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740/ns/181b97867d3d45efb8336d325487db4a, entries=2, sequenceid=6, filesize=5.0 K 2024-12-07T13:24:50,506 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false 2024-12-07T13:24:50,506 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T13:24:50,512 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T13:24:50,513 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:24:50,513 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:24:50,513 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733577890468Running coprocessor pre-close hooks at 1733577890468Disabling compacts and flushes for region at 1733577890468Disabling writes for close at 1733577890468Obtaining lock to block concurrent updates at 1733577890468Preparing flush snapshotting stores in 1588230740 at 1733577890468Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733577890468Flushing stores of hbase:meta,,1.1588230740 at 1733577890469 (+1 ms)Flushing 1588230740/ns: creating writer at 1733577890469Flushing 1588230740/ns: appending metadata at 1733577890482 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733577890483 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6211242d: reopening flushed file at 1733577890497 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 38ms, sequenceid=6, compaction requested=false at 1733577890506 (+9 ms)Writing region close event to WAL at 1733577890508 (+2 ms)Running coprocessor post-close hooks at 1733577890513 (+5 ms)Closed at 1733577890513 2024-12-07T13:24:50,513 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T13:24:50,668 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(976): stopping server c7c455b68129,35435,1733577889191; all regions closed. 2024-12-07T13:24:50,669 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,669 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,670 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,670 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,671 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741834_1010 (size=1152) 2024-12-07T13:24:50,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741834_1010 (size=1152) 2024-12-07T13:24:50,681 DEBUG [RS:0;c7c455b68129:35435 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/oldWALs 2024-12-07T13:24:50,681 INFO [RS:0;c7c455b68129:35435 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C35435%2C1733577889191.meta:.meta(num 1733577890231) 2024-12-07T13:24:50,681 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,682 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,682 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,682 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,682 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,684 INFO [regionserver/c7c455b68129:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T13:24:50,684 INFO [regionserver/c7c455b68129:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T13:24:50,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741833_1009 (size=93) 2024-12-07T13:24:50,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741833_1009 (size=93) 2024-12-07T13:24:50,689 DEBUG [RS:0;c7c455b68129:35435 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/oldWALs 2024-12-07T13:24:50,689 INFO [RS:0;c7c455b68129:35435 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C35435%2C1733577889191:(num 1733577889820) 2024-12-07T13:24:50,689 DEBUG [RS:0;c7c455b68129:35435 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:50,689 INFO [RS:0;c7c455b68129:35435 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:24:50,689 INFO [RS:0;c7c455b68129:35435 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:24:50,690 INFO [RS:0;c7c455b68129:35435 {}] hbase.ChoreService(370): Chore service for: regionserver/c7c455b68129:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T13:24:50,690 INFO [RS:0;c7c455b68129:35435 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:24:50,690 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:24:50,690 INFO [RS:0;c7c455b68129:35435 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35435 2024-12-07T13:24:50,694 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:24:50,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:24:50,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c7c455b68129,35435,1733577889191 2024-12-07T13:24:50,702 INFO [RS:0;c7c455b68129:35435 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:24:50,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:24:50,702 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c7c455b68129,35435,1733577889191] 2024-12-07T13:24:50,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:24:50,781 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c7c455b68129,35435,1733577889191 already deleted, retry=false 2024-12-07T13:24:50,781 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c7c455b68129,35435,1733577889191 expired; onlineServers=0 2024-12-07T13:24:50,781 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c7c455b68129,43255,1733577888981' ***** 2024-12-07T13:24:50,781 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T13:24:50,781 INFO [M:0;c7c455b68129:43255 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:24:50,781 INFO [M:0;c7c455b68129:43255 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:24:50,781 DEBUG [M:0;c7c455b68129:43255 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T13:24:50,781 DEBUG [M:0;c7c455b68129:43255 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T13:24:50,781 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T13:24:50,781 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577889585 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577889585,5,FailOnTimeoutGroup] 2024-12-07T13:24:50,781 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577889585 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577889585,5,FailOnTimeoutGroup] 2024-12-07T13:24:50,782 INFO [M:0;c7c455b68129:43255 {}] hbase.ChoreService(370): Chore service for: master/c7c455b68129:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T13:24:50,782 INFO [M:0;c7c455b68129:43255 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:24:50,782 DEBUG [M:0;c7c455b68129:43255 {}] master.HMaster(1795): Stopping service threads 2024-12-07T13:24:50,782 INFO [M:0;c7c455b68129:43255 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T13:24:50,782 INFO [M:0;c7c455b68129:43255 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:24:50,782 INFO [M:0;c7c455b68129:43255 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T13:24:50,782 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T13:24:50,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T13:24:50,791 DEBUG [M:0;c7c455b68129:43255 {}] zookeeper.ZKUtil(347): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T13:24:50,792 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:50,792 WARN [M:0;c7c455b68129:43255 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T13:24:50,792 INFO [M:0;c7c455b68129:43255 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/.lastflushedseqids 2024-12-07T13:24:50,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741836_1012 (size=99) 2024-12-07T13:24:50,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741836_1012 (size=99) 2024-12-07T13:24:50,798 INFO [M:0;c7c455b68129:43255 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T13:24:50,798 INFO [M:0;c7c455b68129:43255 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T13:24:50,799 DEBUG [M:0;c7c455b68129:43255 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:24:50,799 INFO [M:0;c7c455b68129:43255 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:50,799 DEBUG [M:0;c7c455b68129:43255 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:50,799 DEBUG [M:0;c7c455b68129:43255 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:24:50,799 DEBUG [M:0;c7c455b68129:43255 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:50,799 INFO [M:0;c7c455b68129:43255 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-07T13:24:50,815 DEBUG [M:0;c7c455b68129:43255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/05baef608ab94e07a2a16b4004258ffa is 82, key is hbase:meta,,1/info:regioninfo/1733577890259/Put/seqid=0 2024-12-07T13:24:50,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741837_1013 (size=5672) 2024-12-07T13:24:50,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741837_1013 (size=5672) 2024-12-07T13:24:50,820 INFO [M:0;c7c455b68129:43255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/05baef608ab94e07a2a16b4004258ffa 2024-12-07T13:24:50,842 DEBUG [M:0;c7c455b68129:43255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a476e2dd97934a2a892736b15a6a88d7 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733577890353/Put/seqid=0 2024-12-07T13:24:50,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741838_1014 (size=5275) 2024-12-07T13:24:50,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741838_1014 (size=5275) 2024-12-07T13:24:50,848 INFO [M:0;c7c455b68129:43255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a476e2dd97934a2a892736b15a6a88d7 2024-12-07T13:24:50,868 DEBUG [M:0;c7c455b68129:43255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e8d6f9f3ba9a4c73a469aaf45534c3a8 is 69, key is c7c455b68129,35435,1733577889191/rs:state/1733577889656/Put/seqid=0 2024-12-07T13:24:50,871 INFO [RS:0;c7c455b68129:35435 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:24:50,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:24:50,871 INFO [RS:0;c7c455b68129:35435 {}] regionserver.HRegionServer(1031): Exiting; stopping=c7c455b68129,35435,1733577889191; zookeeper connection closed. 2024-12-07T13:24:50,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35435-0x10000735b880001, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:24:50,871 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1ca141 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1ca141 2024-12-07T13:24:50,871 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T13:24:50,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741839_1015 (size=5156) 2024-12-07T13:24:50,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741839_1015 (size=5156) 2024-12-07T13:24:50,874 INFO [M:0;c7c455b68129:43255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e8d6f9f3ba9a4c73a469aaf45534c3a8 2024-12-07T13:24:50,893 DEBUG [M:0;c7c455b68129:43255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a88ff5686f584f64a1193ff6a0ef63f6 is 52, key is load_balancer_on/state:d/1733577890439/Put/seqid=0 2024-12-07T13:24:50,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741840_1016 (size=5056) 2024-12-07T13:24:50,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741840_1016 (size=5056) 2024-12-07T13:24:50,899 INFO [M:0;c7c455b68129:43255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a88ff5686f584f64a1193ff6a0ef63f6 2024-12-07T13:24:50,906 DEBUG [M:0;c7c455b68129:43255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/05baef608ab94e07a2a16b4004258ffa as hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/05baef608ab94e07a2a16b4004258ffa 2024-12-07T13:24:50,913 INFO [M:0;c7c455b68129:43255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/05baef608ab94e07a2a16b4004258ffa, entries=8, sequenceid=29, filesize=5.5 K 2024-12-07T13:24:50,914 DEBUG [M:0;c7c455b68129:43255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a476e2dd97934a2a892736b15a6a88d7 as hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a476e2dd97934a2a892736b15a6a88d7 2024-12-07T13:24:50,921 INFO [M:0;c7c455b68129:43255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a476e2dd97934a2a892736b15a6a88d7, entries=3, sequenceid=29, filesize=5.2 K 2024-12-07T13:24:50,922 DEBUG [M:0;c7c455b68129:43255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e8d6f9f3ba9a4c73a469aaf45534c3a8 as hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e8d6f9f3ba9a4c73a469aaf45534c3a8 2024-12-07T13:24:50,928 INFO [M:0;c7c455b68129:43255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e8d6f9f3ba9a4c73a469aaf45534c3a8, entries=1, sequenceid=29, filesize=5.0 K 2024-12-07T13:24:50,929 DEBUG [M:0;c7c455b68129:43255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a88ff5686f584f64a1193ff6a0ef63f6 as hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a88ff5686f584f64a1193ff6a0ef63f6 2024-12-07T13:24:50,935 INFO [M:0;c7c455b68129:43255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42747/user/jenkins/test-data/fef1c443-84de-e31e-1cc2-7d766696f91f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a88ff5686f584f64a1193ff6a0ef63f6, entries=1, sequenceid=29, filesize=4.9 K 2024-12-07T13:24:50,936 INFO [M:0;c7c455b68129:43255 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=29, compaction requested=false 2024-12-07T13:24:50,938 INFO [M:0;c7c455b68129:43255 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:50,938 DEBUG [M:0;c7c455b68129:43255 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733577890798Disabling compacts and flushes for region at 1733577890798Disabling writes for close at 1733577890799 (+1 ms)Obtaining lock to block concurrent updates at 1733577890799Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733577890799Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733577890799Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733577890800 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733577890800Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733577890814 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733577890814Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733577890827 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733577890842 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733577890842Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733577890854 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733577890868 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733577890868Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733577890880 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733577890893 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733577890893Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8d52f19: reopening flushed file at 1733577890905 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c1746e2: reopening flushed file at 1733577890913 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5240d312: reopening flushed file at 1733577890921 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b5e4956: reopening flushed file at 1733577890928 (+7 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=29, compaction requested=false at 1733577890936 (+8 ms)Writing region close event to WAL at 1733577890938 (+2 ms)Closed at 1733577890938 2024-12-07T13:24:50,938 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,938 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,938 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,938 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,939 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:24:50,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38195 is added to blk_1073741830_1006 (size=10311) 2024-12-07T13:24:50,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41385 is added to blk_1073741830_1006 (size=10311) 2024-12-07T13:24:50,941 INFO [M:0;c7c455b68129:43255 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T13:24:50,941 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:24:50,941 INFO [M:0;c7c455b68129:43255 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43255 2024-12-07T13:24:50,941 INFO [M:0;c7c455b68129:43255 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:24:51,055 INFO [M:0;c7c455b68129:43255 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:24:51,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:24:51,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43255-0x10000735b880000, quorum=127.0.0.1:57200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:24:51,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@da5059a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:24:51,058 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:24:51,058 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:24:51,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:24:51,059 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/hadoop.log.dir/,STOPPED} 2024-12-07T13:24:51,060 WARN [BP-974771455-172.17.0.3-1733577886218 heartbeating to localhost/127.0.0.1:42747 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:24:51,060 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:24:51,060 WARN [BP-974771455-172.17.0.3-1733577886218 heartbeating to localhost/127.0.0.1:42747 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-974771455-172.17.0.3-1733577886218 (Datanode Uuid acd5bcf6-67bd-4109-a55d-168ee323fbaa) service to localhost/127.0.0.1:42747 2024-12-07T13:24:51,060 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:24:51,061 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/data/data3/current/BP-974771455-172.17.0.3-1733577886218 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:24:51,061 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/data/data4/current/BP-974771455-172.17.0.3-1733577886218 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:24:51,061 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:24:51,064 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4595827f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:24:51,064 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:24:51,064 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:24:51,064 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:24:51,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/hadoop.log.dir/,STOPPED} 2024-12-07T13:24:51,066 WARN [BP-974771455-172.17.0.3-1733577886218 heartbeating to localhost/127.0.0.1:42747 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:24:51,066 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:24:51,066 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:24:51,066 WARN [BP-974771455-172.17.0.3-1733577886218 heartbeating to localhost/127.0.0.1:42747 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-974771455-172.17.0.3-1733577886218 (Datanode Uuid 334e508b-26a7-407b-a216-ed4b22647e15) service to localhost/127.0.0.1:42747 2024-12-07T13:24:51,066 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/data/data1/current/BP-974771455-172.17.0.3-1733577886218 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:24:51,067 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/cluster_2a49fc76-c495-b51f-762a-43cb5e526e92/data/data2/current/BP-974771455-172.17.0.3-1733577886218 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:24:51,067 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:24:51,072 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55cb1221{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:24:51,072 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:24:51,072 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:24:51,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:24:51,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/hadoop.log.dir/,STOPPED} 2024-12-07T13:24:51,078 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T13:24:51,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T13:24:51,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T13:24:51,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/hadoop.log.dir so I do NOT create it in target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d 2024-12-07T13:24:51,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f061fd5c-570e-ad23-4fbc-dc4aad96aa74/hadoop.tmp.dir so I do NOT create it in target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d 2024-12-07T13:24:51,095 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041, deleteOnExit=true 2024-12-07T13:24:51,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T13:24:51,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/test.cache.data in system properties and HBase conf 2024-12-07T13:24:51,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T13:24:51,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir in system properties and HBase conf 2024-12-07T13:24:51,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T13:24:51,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T13:24:51,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T13:24:51,096 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T13:24:51,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:24:51,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:24:51,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T13:24:51,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:24:51,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T13:24:51,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T13:24:51,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:24:51,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:24:51,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T13:24:51,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/nfs.dump.dir in system properties and HBase conf 2024-12-07T13:24:51,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/java.io.tmpdir in system properties and HBase conf 2024-12-07T13:24:51,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:24:51,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T13:24:51,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T13:24:51,108 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:24:51,415 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:24:51,419 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:24:51,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:24:51,422 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:24:51,422 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:24:51,423 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:24:51,423 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@692b8c40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:24:51,423 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ab5393f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:24:51,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c461833{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/java.io.tmpdir/jetty-localhost-36553-hadoop-hdfs-3_4_1-tests_jar-_-any-8868673062497822330/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:24:51,512 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33e53d1d{HTTP/1.1, (http/1.1)}{localhost:36553} 2024-12-07T13:24:51,512 INFO [Time-limited test {}] server.Server(415): Started @109352ms 2024-12-07T13:24:51,523 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:24:51,682 INFO [regionserver/c7c455b68129:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:24:51,802 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:24:51,806 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:24:51,806 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:24:51,807 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:24:51,807 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:24:51,807 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51561b8f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:24:51,808 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@372d60ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:24:51,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@158b5ab3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/java.io.tmpdir/jetty-localhost-42623-hadoop-hdfs-3_4_1-tests_jar-_-any-16697091240665053376/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:24:51,899 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1495e1af{HTTP/1.1, (http/1.1)}{localhost:42623} 2024-12-07T13:24:51,899 INFO [Time-limited test {}] server.Server(415): Started @109740ms 2024-12-07T13:24:51,901 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:24:51,925 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:24:51,928 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:24:51,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:24:51,929 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:24:51,929 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:24:51,929 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5289966b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:24:51,929 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@217a95d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:24:52,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10c7ae56{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/java.io.tmpdir/jetty-localhost-43247-hadoop-hdfs-3_4_1-tests_jar-_-any-9591411168465008885/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:24:52,020 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d85f4da{HTTP/1.1, (http/1.1)}{localhost:43247} 2024-12-07T13:24:52,020 INFO [Time-limited test {}] server.Server(415): Started @109861ms 2024-12-07T13:24:52,021 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:24:53,058 WARN [Thread-671 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data1/current/BP-959501690-172.17.0.3-1733577891118/current, will proceed with Du for space computation calculation, 2024-12-07T13:24:53,058 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data2/current/BP-959501690-172.17.0.3-1733577891118/current, will proceed with Du for space computation calculation, 2024-12-07T13:24:53,075 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:24:53,077 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x164842456d7d82d0 with lease ID 0xea89a762f7bb5345: Processing first storage report for DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b from datanode DatanodeRegistration(127.0.0.1:41375, datanodeUuid=7b803b60-17a9-4d27-bf78-3255a6a5ca0c, infoPort=44111, infoSecurePort=0, ipcPort=33741, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118) 2024-12-07T13:24:53,077 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x164842456d7d82d0 with lease ID 0xea89a762f7bb5345: from storage DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b node DatanodeRegistration(127.0.0.1:41375, datanodeUuid=7b803b60-17a9-4d27-bf78-3255a6a5ca0c, infoPort=44111, infoSecurePort=0, ipcPort=33741, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:24:53,077 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x164842456d7d82d0 with lease ID 0xea89a762f7bb5345: Processing first storage report for DS-e05ba83b-66d4-4a57-af83-e8a150c72900 from datanode DatanodeRegistration(127.0.0.1:41375, datanodeUuid=7b803b60-17a9-4d27-bf78-3255a6a5ca0c, infoPort=44111, infoSecurePort=0, ipcPort=33741, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118) 2024-12-07T13:24:53,078 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x164842456d7d82d0 with lease ID 0xea89a762f7bb5345: from storage DS-e05ba83b-66d4-4a57-af83-e8a150c72900 node DatanodeRegistration(127.0.0.1:41375, datanodeUuid=7b803b60-17a9-4d27-bf78-3255a6a5ca0c, infoPort=44111, infoSecurePort=0, ipcPort=33741, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:24:53,178 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data4/current/BP-959501690-172.17.0.3-1733577891118/current, will proceed with Du for space computation calculation, 2024-12-07T13:24:53,178 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data3/current/BP-959501690-172.17.0.3-1733577891118/current, will proceed with Du for space computation calculation, 2024-12-07T13:24:53,198 WARN [Thread-658 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:24:53,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3fbaa0ecdce1aeaf with lease ID 0xea89a762f7bb5346: Processing first storage report for DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd from datanode DatanodeRegistration(127.0.0.1:40395, datanodeUuid=bafc61d8-ce26-4093-91ed-a19abb4f3d38, infoPort=33821, infoSecurePort=0, ipcPort=36191, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118) 2024-12-07T13:24:53,201 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3fbaa0ecdce1aeaf with lease ID 0xea89a762f7bb5346: from storage DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd node DatanodeRegistration(127.0.0.1:40395, datanodeUuid=bafc61d8-ce26-4093-91ed-a19abb4f3d38, infoPort=33821, infoSecurePort=0, ipcPort=36191, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:24:53,201 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3fbaa0ecdce1aeaf with lease ID 0xea89a762f7bb5346: Processing first storage report for DS-9e930cb4-428e-4e08-9621-be88807a9acf from datanode DatanodeRegistration(127.0.0.1:40395, datanodeUuid=bafc61d8-ce26-4093-91ed-a19abb4f3d38, infoPort=33821, infoSecurePort=0, ipcPort=36191, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118) 2024-12-07T13:24:53,201 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3fbaa0ecdce1aeaf with lease ID 0xea89a762f7bb5346: from storage DS-9e930cb4-428e-4e08-9621-be88807a9acf node DatanodeRegistration(127.0.0.1:40395, datanodeUuid=bafc61d8-ce26-4093-91ed-a19abb4f3d38, infoPort=33821, infoSecurePort=0, ipcPort=36191, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:24:53,262 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d 2024-12-07T13:24:53,265 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/zookeeper_0, clientPort=55405, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T13:24:53,266 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55405 2024-12-07T13:24:53,266 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:53,269 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:53,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41375 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:24:53,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40395 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:24:53,284 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084 with version=8 2024-12-07T13:24:53,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/hbase-staging 2024-12-07T13:24:53,287 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:24:53,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:53,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:53,287 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:24:53,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:53,287 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:24:53,288 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T13:24:53,288 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:24:53,289 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46593 2024-12-07T13:24:53,290 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46593 connecting to ZooKeeper ensemble=127.0.0.1:55405 2024-12-07T13:24:53,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:465930x0, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:24:53,360 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46593-0x10000736c570000 connected 2024-12-07T13:24:53,460 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:53,462 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:53,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:24:53,465 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084, hbase.cluster.distributed=false 2024-12-07T13:24:53,466 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:24:53,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46593 2024-12-07T13:24:53,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46593 2024-12-07T13:24:53,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46593 2024-12-07T13:24:53,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46593 2024-12-07T13:24:53,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46593 2024-12-07T13:24:53,481 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:24:53,481 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:53,481 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:53,481 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:24:53,481 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:53,481 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:24:53,481 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T13:24:53,481 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:24:53,482 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43699 2024-12-07T13:24:53,483 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43699 connecting to ZooKeeper ensemble=127.0.0.1:55405 2024-12-07T13:24:53,484 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:53,486 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:53,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:436990x0, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:24:53,497 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:436990x0, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:24:53,497 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43699-0x10000736c570001 connected 2024-12-07T13:24:53,497 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T13:24:53,498 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T13:24:53,498 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T13:24:53,499 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:24:53,500 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43699 2024-12-07T13:24:53,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43699 2024-12-07T13:24:53,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43699 2024-12-07T13:24:53,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43699 2024-12-07T13:24:53,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43699 2024-12-07T13:24:53,514 DEBUG [M:0;c7c455b68129:46593 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c7c455b68129:46593 2024-12-07T13:24:53,515 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c7c455b68129,46593,1733577893287 2024-12-07T13:24:53,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:24:53,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:24:53,523 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c7c455b68129,46593,1733577893287 2024-12-07T13:24:53,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T13:24:53,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:53,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:53,533 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T13:24:53,534 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c7c455b68129,46593,1733577893287 from backup master directory 2024-12-07T13:24:53,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:24:53,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c7c455b68129,46593,1733577893287 2024-12-07T13:24:53,543 WARN [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:24:53,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:24:53,543 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c7c455b68129,46593,1733577893287 2024-12-07T13:24:53,547 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/hbase.id] with ID: d3971f90-1103-4443-adfb-8b98c7b45d46 2024-12-07T13:24:53,547 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/.tmp/hbase.id 2024-12-07T13:24:53,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40395 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:24:53,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41375 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:24:53,554 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/.tmp/hbase.id]:[hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/hbase.id] 2024-12-07T13:24:53,567 INFO [master/c7c455b68129:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:53,567 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T13:24:53,568 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-07T13:24:53,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:53,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:53,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41375 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:24:53,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40395 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:24:53,589 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:24:53,590 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T13:24:53,590 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:24:53,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41375 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:24:53,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40395 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:24:53,601 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store 2024-12-07T13:24:53,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40395 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:24:53,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41375 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:24:53,609 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:24:53,609 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:24:53,609 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:53,609 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:53,609 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:24:53,609 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:53,609 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:24:53,609 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733577893609Disabling compacts and flushes for region at 1733577893609Disabling writes for close at 1733577893609Writing region close event to WAL at 1733577893609Closed at 1733577893609 2024-12-07T13:24:53,610 WARN [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/.initializing 2024-12-07T13:24:53,610 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287 2024-12-07T13:24:53,613 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C46593%2C1733577893287, suffix=, logDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287, archiveDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/oldWALs, maxLogs=10 2024-12-07T13:24:53,614 INFO [master/c7c455b68129:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C46593%2C1733577893287.1733577893614 2024-12-07T13:24:53,620 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577893614 2024-12-07T13:24:53,625 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44111:44111),(127.0.0.1/127.0.0.1:33821:33821)] 2024-12-07T13:24:53,625 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:24:53,625 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:24:53,626 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,626 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T13:24:53,629 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:53,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:53,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T13:24:53,631 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:53,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:24:53,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T13:24:53,633 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:53,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:24:53,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T13:24:53,634 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:53,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:24:53,635 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,636 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,636 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,638 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,638 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,638 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T13:24:53,639 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:24:53,642 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:24:53,642 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856398, jitterRate=0.08896656334400177}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T13:24:53,644 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733577893626Initializing all the Stores at 1733577893627 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577893627Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577893627Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577893627Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577893627Cleaning up temporary data from old regions at 1733577893638 (+11 ms)Region opened successfully at 1733577893644 (+6 ms) 2024-12-07T13:24:53,644 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T13:24:53,648 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79ccd9db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:24:53,648 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T13:24:53,649 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T13:24:53,649 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T13:24:53,649 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T13:24:53,649 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T13:24:53,650 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T13:24:53,650 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T13:24:53,652 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T13:24:53,653 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T13:24:53,665 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T13:24:53,665 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T13:24:53,666 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T13:24:53,675 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T13:24:53,676 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T13:24:53,678 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T13:24:53,691 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T13:24:53,694 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T13:24:53,707 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T13:24:53,709 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T13:24:53,717 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T13:24:53,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:24:53,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:24:53,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:53,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:53,729 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c7c455b68129,46593,1733577893287, sessionid=0x10000736c570000, setting cluster-up flag (Was=false) 2024-12-07T13:24:53,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:53,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:53,781 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T13:24:53,782 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,46593,1733577893287 2024-12-07T13:24:53,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:53,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:53,838 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T13:24:53,842 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,46593,1733577893287 2024-12-07T13:24:53,845 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T13:24:53,849 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T13:24:53,849 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T13:24:53,849 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T13:24:53,850 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c7c455b68129,46593,1733577893287 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T13:24:53,852 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:24:53,852 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:24:53,852 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:24:53,853 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:24:53,853 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c7c455b68129:0, corePoolSize=10, maxPoolSize=10 2024-12-07T13:24:53,853 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,853 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:24:53,853 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,854 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733577923854 2024-12-07T13:24:53,854 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T13:24:53,854 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T13:24:53,854 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T13:24:53,854 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T13:24:53,854 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T13:24:53,854 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T13:24:53,855 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,855 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T13:24:53,855 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T13:24:53,855 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:24:53,855 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T13:24:53,855 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T13:24:53,856 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T13:24:53,856 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T13:24:53,856 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577893856,5,FailOnTimeoutGroup] 2024-12-07T13:24:53,856 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577893856,5,FailOnTimeoutGroup] 2024-12-07T13:24:53,857 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,857 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T13:24:53,857 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,857 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:53,857 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,857 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T13:24:53,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40395 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:24:53,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41375 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:24:53,866 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T13:24:53,866 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084 2024-12-07T13:24:53,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40395 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:24:53,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41375 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:24:53,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:24:53,877 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:24:53,878 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:24:53,878 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:53,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:53,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:24:53,880 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:24:53,880 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:53,881 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:53,881 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:24:53,882 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:24:53,882 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:53,883 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:53,883 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:24:53,884 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:24:53,884 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:53,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:53,885 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:24:53,886 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740 2024-12-07T13:24:53,886 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740 2024-12-07T13:24:53,888 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:24:53,888 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:24:53,888 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:24:53,890 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:24:53,892 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:24:53,892 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713269, jitterRate=-0.09303165972232819}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:24:53,894 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733577893875Initializing all the Stores at 1733577893876 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577893876Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577893876Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577893876Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577893877 (+1 ms)Cleaning up temporary data from old regions at 1733577893888 (+11 ms)Region opened successfully at 1733577893894 (+6 ms) 2024-12-07T13:24:53,894 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:24:53,894 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:24:53,894 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:24:53,894 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:24:53,894 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:24:53,894 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:24:53,894 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733577893894Disabling compacts and flushes for region at 1733577893894Disabling writes for close at 1733577893894Writing region close event to WAL at 1733577893894Closed at 1733577893894 2024-12-07T13:24:53,896 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:24:53,896 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T13:24:53,896 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T13:24:53,898 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:24:53,899 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T13:24:53,906 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(746): ClusterId : d3971f90-1103-4443-adfb-8b98c7b45d46 2024-12-07T13:24:53,906 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T13:24:53,934 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T13:24:53,934 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T13:24:53,944 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T13:24:53,945 DEBUG [RS:0;c7c455b68129:43699 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e14b47d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:24:53,955 DEBUG [RS:0;c7c455b68129:43699 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c7c455b68129:43699 2024-12-07T13:24:53,955 INFO [RS:0;c7c455b68129:43699 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T13:24:53,955 INFO [RS:0;c7c455b68129:43699 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T13:24:53,955 DEBUG [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T13:24:53,956 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(2659): reportForDuty to master=c7c455b68129,46593,1733577893287 with port=43699, startcode=1733577893481 2024-12-07T13:24:53,956 DEBUG [RS:0;c7c455b68129:43699 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T13:24:53,958 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46281, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T13:24:53,959 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46593 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c7c455b68129,43699,1733577893481 2024-12-07T13:24:53,959 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46593 {}] master.ServerManager(517): Registering regionserver=c7c455b68129,43699,1733577893481 2024-12-07T13:24:53,960 DEBUG [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084 2024-12-07T13:24:53,960 DEBUG [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35785 2024-12-07T13:24:53,961 DEBUG [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T13:24:53,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:24:53,971 DEBUG [RS:0;c7c455b68129:43699 {}] zookeeper.ZKUtil(111): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c7c455b68129,43699,1733577893481 2024-12-07T13:24:53,971 WARN [RS:0;c7c455b68129:43699 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:24:53,971 INFO [RS:0;c7c455b68129:43699 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:24:53,971 DEBUG [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481 2024-12-07T13:24:53,971 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c7c455b68129,43699,1733577893481] 2024-12-07T13:24:53,975 INFO [RS:0;c7c455b68129:43699 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T13:24:53,977 INFO [RS:0;c7c455b68129:43699 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T13:24:53,977 INFO [RS:0;c7c455b68129:43699 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T13:24:53,977 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,977 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T13:24:53,978 INFO [RS:0;c7c455b68129:43699 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T13:24:53,978 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,978 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,978 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,978 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:24:53,979 DEBUG [RS:0;c7c455b68129:43699 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:24:53,980 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,980 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,980 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,980 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,980 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,980 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,43699,1733577893481-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:24:53,993 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T13:24:53,993 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,43699,1733577893481-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,993 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:53,993 INFO [RS:0;c7c455b68129:43699 {}] regionserver.Replication(171): c7c455b68129,43699,1733577893481 started 2024-12-07T13:24:54,006 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,006 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(1482): Serving as c7c455b68129,43699,1733577893481, RpcServer on c7c455b68129/172.17.0.3:43699, sessionid=0x10000736c570001 2024-12-07T13:24:54,006 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T13:24:54,006 DEBUG [RS:0;c7c455b68129:43699 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c7c455b68129,43699,1733577893481 2024-12-07T13:24:54,006 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,43699,1733577893481' 2024-12-07T13:24:54,006 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T13:24:54,007 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T13:24:54,007 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T13:24:54,007 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T13:24:54,007 DEBUG [RS:0;c7c455b68129:43699 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c7c455b68129,43699,1733577893481 2024-12-07T13:24:54,007 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,43699,1733577893481' 2024-12-07T13:24:54,007 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T13:24:54,008 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T13:24:54,008 DEBUG [RS:0;c7c455b68129:43699 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T13:24:54,008 INFO [RS:0;c7c455b68129:43699 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T13:24:54,008 INFO [RS:0;c7c455b68129:43699 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T13:24:54,049 WARN [c7c455b68129:46593 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T13:24:54,111 INFO [RS:0;c7c455b68129:43699 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C43699%2C1733577893481, suffix=, logDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481, archiveDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs, maxLogs=32 2024-12-07T13:24:54,113 INFO [RS:0;c7c455b68129:43699 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C43699%2C1733577893481.1733577894113 2024-12-07T13:24:54,122 INFO [RS:0;c7c455b68129:43699 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 2024-12-07T13:24:54,123 DEBUG [RS:0;c7c455b68129:43699 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44111:44111),(127.0.0.1/127.0.0.1:33821:33821)] 2024-12-07T13:24:54,300 DEBUG [c7c455b68129:46593 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T13:24:54,300 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c7c455b68129,43699,1733577893481 2024-12-07T13:24:54,302 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,43699,1733577893481, state=OPENING 2024-12-07T13:24:54,312 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T13:24:54,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:54,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:24:54,324 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:24:54,324 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:24:54,324 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:24:54,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,43699,1733577893481}] 2024-12-07T13:24:54,482 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T13:24:54,487 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37505, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T13:24:54,494 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T13:24:54,494 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:24:54,497 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C43699%2C1733577893481.meta, suffix=.meta, logDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481, archiveDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs, maxLogs=32 2024-12-07T13:24:54,498 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta 2024-12-07T13:24:54,505 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta 2024-12-07T13:24:54,506 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44111:44111),(127.0.0.1/127.0.0.1:33821:33821)] 2024-12-07T13:24:54,507 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:24:54,507 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T13:24:54,508 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T13:24:54,508 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T13:24:54,508 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T13:24:54,508 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:24:54,508 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T13:24:54,508 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T13:24:54,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:24:54,512 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:24:54,512 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:54,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:54,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:24:54,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:24:54,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:54,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:54,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:24:54,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:24:54,517 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:54,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:54,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:24:54,518 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:24:54,518 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:54,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:24:54,519 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:24:54,520 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740 2024-12-07T13:24:54,521 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740 2024-12-07T13:24:54,522 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:24:54,522 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:24:54,522 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:24:54,524 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:24:54,524 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=738873, jitterRate=-0.0604751855134964}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:24:54,525 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T13:24:54,525 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733577894509Writing region info on filesystem at 1733577894509Initializing all the Stores at 1733577894510 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577894510Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577894510Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577894510Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577894510Cleaning up temporary data from old regions at 1733577894522 (+12 ms)Running coprocessor post-open hooks at 1733577894525 (+3 ms)Region opened successfully at 1733577894525 2024-12-07T13:24:54,526 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733577894481 2024-12-07T13:24:54,528 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T13:24:54,528 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T13:24:54,529 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,43699,1733577893481 2024-12-07T13:24:54,530 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,43699,1733577893481, state=OPEN 2024-12-07T13:24:54,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:24:54,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:24:54,659 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c7c455b68129,43699,1733577893481 2024-12-07T13:24:54,659 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:24:54,659 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:24:54,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T13:24:54,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,43699,1733577893481 in 335 msec 2024-12-07T13:24:54,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T13:24:54,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 769 msec 2024-12-07T13:24:54,670 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:24:54,670 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T13:24:54,671 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:24:54,672 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,43699,1733577893481, seqNum=-1] 2024-12-07T13:24:54,672 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:24:54,673 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52217, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:24:54,680 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 832 msec 2024-12-07T13:24:54,680 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733577894680, completionTime=-1 2024-12-07T13:24:54,681 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T13:24:54,681 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T13:24:54,683 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T13:24:54,683 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733577954683 2024-12-07T13:24:54,683 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733578014683 2024-12-07T13:24:54,683 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-07T13:24:54,683 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46593,1733577893287-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,684 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46593,1733577893287-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,684 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46593,1733577893287-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,684 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c7c455b68129:46593, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,684 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,684 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,686 DEBUG [master/c7c455b68129:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T13:24:54,687 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.144sec 2024-12-07T13:24:54,688 INFO [master/c7c455b68129:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T13:24:54,688 INFO [master/c7c455b68129:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T13:24:54,688 INFO [master/c7c455b68129:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T13:24:54,688 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T13:24:54,688 INFO [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T13:24:54,688 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46593,1733577893287-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:24:54,688 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46593,1733577893287-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T13:24:54,690 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T13:24:54,690 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T13:24:54,690 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46593,1733577893287-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,707 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52eb1c40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:24:54,707 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c7c455b68129,46593,-1 for getting cluster id 2024-12-07T13:24:54,707 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T13:24:54,708 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd3971f90-1103-4443-adfb-8b98c7b45d46' 2024-12-07T13:24:54,709 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T13:24:54,709 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d3971f90-1103-4443-adfb-8b98c7b45d46" 2024-12-07T13:24:54,709 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9ae7cad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:24:54,709 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c7c455b68129,46593,-1] 2024-12-07T13:24:54,709 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T13:24:54,710 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:24:54,711 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39326, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T13:24:54,712 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21e18edd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:24:54,713 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:24:54,714 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,43699,1733577893481, seqNum=-1] 2024-12-07T13:24:54,714 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:24:54,716 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35604, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:24:54,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c7c455b68129,46593,1733577893287 2024-12-07T13:24:54,719 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:54,722 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T13:24:54,737 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:24:54,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:54,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:54,737 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:24:54,737 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:24:54,738 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:24:54,738 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T13:24:54,738 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:24:54,738 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42863 2024-12-07T13:24:54,739 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42863 connecting to ZooKeeper ensemble=127.0.0.1:55405 2024-12-07T13:24:54,740 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:54,741 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:24:54,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428630x0, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:24:54,765 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:428630x0, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-07T13:24:54,765 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42863-0x10000736c570002 connected 2024-12-07T13:24:54,765 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-07T13:24:54,766 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T13:24:54,767 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T13:24:54,768 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42863-0x10000736c570002, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T13:24:54,770 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42863-0x10000736c570002, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:24:54,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42863 2024-12-07T13:24:54,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42863 2024-12-07T13:24:54,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42863 2024-12-07T13:24:54,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42863 2024-12-07T13:24:54,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42863 2024-12-07T13:24:54,774 INFO [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(746): ClusterId : d3971f90-1103-4443-adfb-8b98c7b45d46 2024-12-07T13:24:54,774 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T13:24:54,786 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T13:24:54,786 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T13:24:54,797 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T13:24:54,798 DEBUG [RS:1;c7c455b68129:42863 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5baad5ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:24:54,812 DEBUG [RS:1;c7c455b68129:42863 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;c7c455b68129:42863 2024-12-07T13:24:54,812 INFO [RS:1;c7c455b68129:42863 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T13:24:54,812 INFO [RS:1;c7c455b68129:42863 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T13:24:54,812 DEBUG [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T13:24:54,812 INFO [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(2659): reportForDuty to master=c7c455b68129,46593,1733577893287 with port=42863, startcode=1733577894737 2024-12-07T13:24:54,812 DEBUG [RS:1;c7c455b68129:42863 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T13:24:54,814 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46429, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T13:24:54,814 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46593 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c7c455b68129,42863,1733577894737 2024-12-07T13:24:54,814 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46593 {}] master.ServerManager(517): Registering regionserver=c7c455b68129,42863,1733577894737 2024-12-07T13:24:54,816 DEBUG [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084 2024-12-07T13:24:54,816 DEBUG [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35785 2024-12-07T13:24:54,816 DEBUG [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T13:24:54,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:24:54,828 DEBUG [RS:1;c7c455b68129:42863 {}] zookeeper.ZKUtil(111): regionserver:42863-0x10000736c570002, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c7c455b68129,42863,1733577894737 2024-12-07T13:24:54,828 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c7c455b68129,42863,1733577894737] 2024-12-07T13:24:54,828 WARN [RS:1;c7c455b68129:42863 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:24:54,828 INFO [RS:1;c7c455b68129:42863 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:24:54,828 DEBUG [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737 2024-12-07T13:24:54,832 INFO [RS:1;c7c455b68129:42863 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T13:24:54,833 INFO [RS:1;c7c455b68129:42863 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T13:24:54,833 INFO [RS:1;c7c455b68129:42863 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T13:24:54,833 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,833 INFO [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T13:24:54,834 INFO [RS:1;c7c455b68129:42863 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T13:24:54,834 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,835 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,836 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:24:54,836 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:24:54,836 DEBUG [RS:1;c7c455b68129:42863 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:24:54,837 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,837 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,837 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,837 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,837 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,837 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,42863,1733577894737-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:24:54,851 INFO [RS:1;c7c455b68129:42863 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T13:24:54,851 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,42863,1733577894737-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,851 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,851 INFO [RS:1;c7c455b68129:42863 {}] regionserver.Replication(171): c7c455b68129,42863,1733577894737 started 2024-12-07T13:24:54,863 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:24:54,863 INFO [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(1482): Serving as c7c455b68129,42863,1733577894737, RpcServer on c7c455b68129/172.17.0.3:42863, sessionid=0x10000736c570002 2024-12-07T13:24:54,863 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T13:24:54,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;c7c455b68129:42863,5,FailOnTimeoutGroup] 2024-12-07T13:24:54,863 DEBUG [RS:1;c7c455b68129:42863 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c7c455b68129,42863,1733577894737 2024-12-07T13:24:54,863 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,42863,1733577894737' 2024-12-07T13:24:54,863 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T13:24:54,863 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-07T13:24:54,864 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T13:24:54,864 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T13:24:54,864 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T13:24:54,864 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T13:24:54,864 DEBUG [RS:1;c7c455b68129:42863 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c7c455b68129,42863,1733577894737 2024-12-07T13:24:54,864 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,42863,1733577894737' 2024-12-07T13:24:54,864 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T13:24:54,865 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T13:24:54,865 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is c7c455b68129,46593,1733577893287 2024-12-07T13:24:54,865 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@e7fd738 2024-12-07T13:24:54,865 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T13:24:54,865 DEBUG [RS:1;c7c455b68129:42863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T13:24:54,866 INFO [RS:1;c7c455b68129:42863 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T13:24:54,866 INFO [RS:1;c7c455b68129:42863 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T13:24:54,867 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39340, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T13:24:54,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46593 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-07T13:24:54,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46593 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-07T13:24:54,868 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46593 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:24:54,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46593 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-07T13:24:54,870 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T13:24:54,870 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:54,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46593 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-07T13:24:54,872 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T13:24:54,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46593 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T13:24:54,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41375 is added to blk_1073741835_1011 (size=393) 2024-12-07T13:24:54,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40395 is added to blk_1073741835_1011 (size=393) 2024-12-07T13:24:54,883 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e6098b8ab71fc33e054cc7633f0ee629, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084 2024-12-07T13:24:54,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41375 is added to blk_1073741836_1012 (size=76) 2024-12-07T13:24:54,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40395 is added to blk_1073741836_1012 (size=76) 2024-12-07T13:24:54,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:24:54,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing e6098b8ab71fc33e054cc7633f0ee629, disabling compactions & flushes 2024-12-07T13:24:54,890 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:24:54,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:24:54,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. after waiting 0 ms 2024-12-07T13:24:54,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:24:54,890 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:24:54,890 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for e6098b8ab71fc33e054cc7633f0ee629: Waiting for close lock at 1733577894890Disabling compacts and flushes for region at 1733577894890Disabling writes for close at 1733577894890Writing region close event to WAL at 1733577894890Closed at 1733577894890 2024-12-07T13:24:54,892 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T13:24:54,892 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733577894892"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733577894892"}]},"ts":"1733577894892"} 2024-12-07T13:24:54,894 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T13:24:54,895 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T13:24:54,895 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733577894895"}]},"ts":"1733577894895"} 2024-12-07T13:24:54,898 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-07T13:24:54,898 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e6098b8ab71fc33e054cc7633f0ee629, ASSIGN}] 2024-12-07T13:24:54,899 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e6098b8ab71fc33e054cc7633f0ee629, ASSIGN 2024-12-07T13:24:54,901 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e6098b8ab71fc33e054cc7633f0ee629, ASSIGN; state=OFFLINE, location=c7c455b68129,43699,1733577893481; forceNewPlan=false, retain=false 2024-12-07T13:24:54,970 INFO [RS:1;c7c455b68129:42863 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C42863%2C1733577894737, suffix=, logDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737, archiveDir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs, maxLogs=32 2024-12-07T13:24:54,972 INFO [RS:1;c7c455b68129:42863 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C42863%2C1733577894737.1733577894972 2024-12-07T13:24:54,981 INFO [RS:1;c7c455b68129:42863 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 2024-12-07T13:24:54,984 DEBUG [RS:1;c7c455b68129:42863 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33821:33821),(127.0.0.1/127.0.0.1:44111:44111)] 2024-12-07T13:24:55,052 INFO [c7c455b68129:46593 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T13:24:55,052 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e6098b8ab71fc33e054cc7633f0ee629, regionState=OPENING, regionLocation=c7c455b68129,43699,1733577893481 2024-12-07T13:24:55,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e6098b8ab71fc33e054cc7633f0ee629, ASSIGN because future has completed 2024-12-07T13:24:55,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e6098b8ab71fc33e054cc7633f0ee629, server=c7c455b68129,43699,1733577893481}] 2024-12-07T13:24:55,223 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:24:55,224 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e6098b8ab71fc33e054cc7633f0ee629, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:24:55,224 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,225 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:24:55,225 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,225 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,227 INFO [StoreOpener-e6098b8ab71fc33e054cc7633f0ee629-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,229 INFO [StoreOpener-e6098b8ab71fc33e054cc7633f0ee629-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e6098b8ab71fc33e054cc7633f0ee629 columnFamilyName info 2024-12-07T13:24:55,229 DEBUG [StoreOpener-e6098b8ab71fc33e054cc7633f0ee629-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:24:55,229 INFO [StoreOpener-e6098b8ab71fc33e054cc7633f0ee629-1 {}] regionserver.HStore(327): Store=e6098b8ab71fc33e054cc7633f0ee629/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:24:55,230 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,231 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,231 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,232 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,232 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,234 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,237 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:24:55,238 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e6098b8ab71fc33e054cc7633f0ee629; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779586, jitterRate=-0.0087052583694458}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T13:24:55,238 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:24:55,239 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e6098b8ab71fc33e054cc7633f0ee629: Running coprocessor pre-open hook at 1733577895225Writing region info on filesystem at 1733577895225Initializing all the Stores at 1733577895226 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577895226Cleaning up temporary data from old regions at 1733577895232 (+6 ms)Running coprocessor post-open hooks at 1733577895238 (+6 ms)Region opened successfully at 1733577895239 (+1 ms) 2024-12-07T13:24:55,240 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629., pid=6, masterSystemTime=1733577895214 2024-12-07T13:24:55,243 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:24:55,244 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:24:55,245 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e6098b8ab71fc33e054cc7633f0ee629, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,43699,1733577893481 2024-12-07T13:24:55,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e6098b8ab71fc33e054cc7633f0ee629, server=c7c455b68129,43699,1733577893481 because future has completed 2024-12-07T13:24:55,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T13:24:55,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e6098b8ab71fc33e054cc7633f0ee629, server=c7c455b68129,43699,1733577893481 in 192 msec 2024-12-07T13:24:55,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T13:24:55,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=e6098b8ab71fc33e054cc7633f0ee629, ASSIGN in 357 msec 2024-12-07T13:24:55,260 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T13:24:55,260 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733577895260"}]},"ts":"1733577895260"} 2024-12-07T13:24:55,262 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-07T13:24:55,264 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T13:24:55,266 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 396 msec 2024-12-07T13:24:55,743 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:24:55,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:24:55,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:24:55,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:24:55,764 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:24:58,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T13:24:58,427 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T13:24:58,430 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-07T13:24:58,430 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-07T13:24:58,431 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:24:58,431 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T13:24:58,432 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T13:24:58,432 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T13:24:59,975 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-07T13:25:00,728 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:25:00,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:00,748 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:00,750 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:00,751 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:04,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46593 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T13:25:04,918 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-07T13:25:04,919 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-07T13:25:04,927 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-07T13:25:04,928 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:25:04,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:25:04,944 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:25:04,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:25:04,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:25:04,945 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:25:04,946 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45d50f98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:25:04,946 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cac6b83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:25:05,036 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@15751333{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/java.io.tmpdir/jetty-localhost-46321-hadoop-hdfs-3_4_1-tests_jar-_-any-285277414132770412/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:05,037 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@184b7b23{HTTP/1.1, (http/1.1)}{localhost:46321} 2024-12-07T13:25:05,037 INFO [Time-limited test {}] server.Server(415): Started @122877ms 2024-12-07T13:25:05,038 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:25:05,065 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:25:05,068 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:25:05,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:25:05,069 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:25:05,069 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:25:05,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ed35b1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:25:05,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3eb2cdd3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:25:05,164 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49584ab0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/java.io.tmpdir/jetty-localhost-40609-hadoop-hdfs-3_4_1-tests_jar-_-any-16011374566770785487/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:05,164 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59d0863f{HTTP/1.1, (http/1.1)}{localhost:40609} 2024-12-07T13:25:05,164 INFO [Time-limited test {}] server.Server(415): Started @123005ms 2024-12-07T13:25:05,166 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:25:05,200 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:25:05,202 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:25:05,203 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:25:05,203 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:25:05,203 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:25:05,204 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5455501c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:25:05,204 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@264a677b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:25:05,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@33f262d4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/java.io.tmpdir/jetty-localhost-46265-hadoop-hdfs-3_4_1-tests_jar-_-any-3307975981934979721/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:05,297 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@44692c68{HTTP/1.1, (http/1.1)}{localhost:46265} 2024-12-07T13:25:05,297 INFO [Time-limited test {}] server.Server(415): Started @123138ms 2024-12-07T13:25:05,299 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:25:06,960 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5/current/BP-959501690-172.17.0.3-1733577891118/current, will proceed with Du for space computation calculation, 2024-12-07T13:25:06,960 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6/current/BP-959501690-172.17.0.3-1733577891118/current, will proceed with Du for space computation calculation, 2024-12-07T13:25:06,979 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:25:06,983 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x694695171c5dd52e with lease ID 0xea89a762f7bb5347: Processing first storage report for DS-317dae2a-1dd6-4115-b7da-799a141a3dbf from datanode DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118) 2024-12-07T13:25:06,983 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x694695171c5dd52e with lease ID 0xea89a762f7bb5347: from storage DS-317dae2a-1dd6-4115-b7da-799a141a3dbf node DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:06,983 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x694695171c5dd52e with lease ID 0xea89a762f7bb5347: Processing first storage report for DS-b6678d7b-2096-4e51-bb31-d3511da4e823 from datanode DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118) 2024-12-07T13:25:06,983 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x694695171c5dd52e with lease ID 0xea89a762f7bb5347: from storage DS-b6678d7b-2096-4e51-bb31-d3511da4e823 node DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:07,070 WARN [Thread-878 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data7/current/BP-959501690-172.17.0.3-1733577891118/current, will proceed with Du for space computation calculation, 2024-12-07T13:25:07,070 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data8/current/BP-959501690-172.17.0.3-1733577891118/current, will proceed with Du for space computation calculation, 2024-12-07T13:25:07,085 WARN [Thread-830 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:25:07,087 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x821a2aa262317cde with lease ID 0xea89a762f7bb5348: Processing first storage report for DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60 from datanode DatanodeRegistration(127.0.0.1:41655, datanodeUuid=0d78caf8-0f38-428f-9cf0-01e5ac9a9d07, infoPort=36089, infoSecurePort=0, ipcPort=39509, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118) 2024-12-07T13:25:07,087 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x821a2aa262317cde with lease ID 0xea89a762f7bb5348: from storage DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60 node DatanodeRegistration(127.0.0.1:41655, datanodeUuid=0d78caf8-0f38-428f-9cf0-01e5ac9a9d07, infoPort=36089, infoSecurePort=0, ipcPort=39509, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:07,087 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x821a2aa262317cde with lease ID 0xea89a762f7bb5348: Processing first storage report for DS-b6c9032c-9e40-4840-9134-3811cfaa9ede from datanode DatanodeRegistration(127.0.0.1:41655, datanodeUuid=0d78caf8-0f38-428f-9cf0-01e5ac9a9d07, infoPort=36089, infoSecurePort=0, ipcPort=39509, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118) 2024-12-07T13:25:07,087 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x821a2aa262317cde with lease ID 0xea89a762f7bb5348: from storage DS-b6c9032c-9e40-4840-9134-3811cfaa9ede node DatanodeRegistration(127.0.0.1:41655, datanodeUuid=0d78caf8-0f38-428f-9cf0-01e5ac9a9d07, infoPort=36089, infoSecurePort=0, ipcPort=39509, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:07,126 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data9/current/BP-959501690-172.17.0.3-1733577891118/current, will proceed with Du for space computation calculation, 2024-12-07T13:25:07,126 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data10/current/BP-959501690-172.17.0.3-1733577891118/current, will proceed with Du for space computation calculation, 2024-12-07T13:25:07,145 WARN [Thread-852 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:25:07,147 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd2e754cf3c359e5 with lease ID 0xea89a762f7bb5349: Processing first storage report for DS-362498b2-c630-41dd-a062-e31650fa79e9 from datanode DatanodeRegistration(127.0.0.1:40487, datanodeUuid=e7e8cc2e-cd70-40eb-8c2c-1622627d1944, infoPort=45283, infoSecurePort=0, ipcPort=38267, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118) 2024-12-07T13:25:07,147 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd2e754cf3c359e5 with lease ID 0xea89a762f7bb5349: from storage DS-362498b2-c630-41dd-a062-e31650fa79e9 node DatanodeRegistration(127.0.0.1:40487, datanodeUuid=e7e8cc2e-cd70-40eb-8c2c-1622627d1944, infoPort=45283, infoSecurePort=0, ipcPort=38267, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:07,147 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfd2e754cf3c359e5 with lease ID 0xea89a762f7bb5349: Processing first storage report for DS-aa5fef15-0319-4d2f-8d54-e9e44a26cebb from datanode DatanodeRegistration(127.0.0.1:40487, datanodeUuid=e7e8cc2e-cd70-40eb-8c2c-1622627d1944, infoPort=45283, infoSecurePort=0, ipcPort=38267, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118) 2024-12-07T13:25:07,147 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfd2e754cf3c359e5 with lease ID 0xea89a762f7bb5349: from storage DS-aa5fef15-0319-4d2f-8d54-e9e44a26cebb node DatanodeRegistration(127.0.0.1:40487, datanodeUuid=e7e8cc2e-cd70-40eb-8c2c-1622627d1944, infoPort=45283, infoSecurePort=0, ipcPort=38267, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:07,248 WARN [ResponseProcessor for block BP-959501690-172.17.0.3-1733577891118:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-959501690-172.17.0.3-1733577891118:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,249 WARN [ResponseProcessor for block BP-959501690-172.17.0.3-1733577891118:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-959501690-172.17.0.3-1733577891118:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-959501690-172.17.0.3-1733577891118:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,249 WARN [ResponseProcessor for block BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,249 WARN [ResponseProcessor for block BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,249 WARN [DataStreamer for file /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 block BP-959501690-172.17.0.3-1733577891118:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK], DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:07,250 WARN [DataStreamer for file /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577893614 block BP-959501690-172.17.0.3-1733577891118:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:07,250 WARN [DataStreamer for file /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 block BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:07,251 WARN [DataStreamer for file /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta block BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:07,250 WARN [PacketResponder: BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40395] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,251 WARN [PacketResponder: BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40395] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,250 WARN [PacketResponder: BP-959501690-172.17.0.3-1733577891118:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40395] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,252 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1857253718_22 at /127.0.0.1:35006 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35006 dst: /127.0.0.1:40395 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:43444 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41375:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43444 dst: /127.0.0.1:41375 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1224523256_22 at /127.0.0.1:43418 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41375:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43418 dst: /127.0.0.1:41375 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:34946 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34946 dst: /127.0.0.1:40395 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1857253718_22 at /127.0.0.1:43478 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:41375:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43478 dst: /127.0.0.1:41375 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,253 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10c7ae56{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:07,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:43432 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41375:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43432 dst: /127.0.0.1:41375 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,254 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d85f4da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:25:07,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1224523256_22 at /127.0.0.1:34928 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34928 dst: /127.0.0.1:40395 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,254 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:25:07,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:34960 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40395:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34960 dst: /127.0.0.1:40395 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:07,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@217a95d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:25:07,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5289966b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,STOPPED} 2024-12-07T13:25:07,256 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:25:07,256 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:25:07,256 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:25:07,256 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-959501690-172.17.0.3-1733577891118 (Datanode Uuid bafc61d8-ce26-4093-91ed-a19abb4f3d38) service to localhost/127.0.0.1:35785 2024-12-07T13:25:07,257 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data3/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:07,257 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data4/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:07,257 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:25:07,258 WARN [DataStreamer for file /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577893614 block BP-959501690-172.17.0.3-1733577891118:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,258 WARN [DataStreamer for file /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 block BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,258 WARN [DataStreamer for file /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 block BP-959501690-172.17.0.3-1733577891118:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,258 WARN [DataStreamer for file /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta block BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@158b5ab3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:07,265 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1495e1af{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:25:07,265 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:25:07,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@372d60ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:25:07,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51561b8f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,STOPPED} 2024-12-07T13:25:07,266 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:25:07,266 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:25:07,266 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-959501690-172.17.0.3-1733577891118 (Datanode Uuid 7b803b60-17a9-4d27-bf78-3255a6a5ca0c) service to localhost/127.0.0.1:35785 2024-12-07T13:25:07,266 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:25:07,267 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data1/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:07,267 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data2/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:07,267 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:25:07,271 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629., hostname=c7c455b68129,43699,1733577893481, seqNum=2] 2024-12-07T13:25:07,272 ERROR [FSHLog-0-hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084-prefix:c7c455b68129,43699,1733577893481 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,272 WARN [FSHLog-0-hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084-prefix:c7c455b68129,43699,1733577893481 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,272 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,272 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C43699%2C1733577893481:(num 1733577894113) roll requested 2024-12-07T13:25:07,273 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C43699%2C1733577893481.1733577907272 2024-12-07T13:25:07,279 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:07,279 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:07,279 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:07,279 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:07,279 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:07,279 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577907272 2024-12-07T13:25:07,280 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,280 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:07,281 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-07T13:25:07,281 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-07T13:25:07,281 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 2024-12-07T13:25:07,283 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45283:45283),(127.0.0.1/127.0.0.1:36089:36089)] 2024-12-07T13:25:07,283 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 is not closed yet, will try archiving it next time 2024-12-07T13:25:07,283 WARN [IPC Server handler 3 on default port 35785 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-07T13:25:07,286 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 after 4ms 2024-12-07T13:25:07,592 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:08,837 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:09,283 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:09,284 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577907272 2024-12-07T13:25:09,285 WARN [ResponseProcessor for block BP-959501690-172.17.0.3-1733577891118:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-959501690-172.17.0.3-1733577891118:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:09,285 WARN [DataStreamer for file /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577907272 block BP-959501690-172.17.0.3-1733577891118:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:09,286 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:34312 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:40487:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34312 dst: /127.0.0.1:40487 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:09,286 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:34972 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:41655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34972 dst: /127.0.0.1:41655 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:09,331 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@33f262d4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:09,332 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@44692c68{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:25:09,332 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:25:09,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@264a677b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:25:09,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5455501c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,STOPPED} 2024-12-07T13:25:09,336 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:25:09,336 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:25:09,336 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-959501690-172.17.0.3-1733577891118 (Datanode Uuid e7e8cc2e-cd70-40eb-8c2c-1622627d1944) service to localhost/127.0.0.1:35785 2024-12-07T13:25:09,336 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:25:09,337 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data9/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:09,337 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data10/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:09,337 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:25:09,593 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:10,838 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:11,283 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:11,284 WARN [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]] 2024-12-07T13:25:11,284 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C43699%2C1733577893481:(num 1733577907272) roll requested 2024-12-07T13:25:11,285 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C43699%2C1733577893481.1733577911284 2024-12-07T13:25:11,288 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 after 4007ms 2024-12-07T13:25:11,289 WARN [Thread-909 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:11,289 WARN [Thread-909 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:11,289 WARN [Thread-909 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741839_1021 2024-12-07T13:25:11,293 WARN [Thread-909 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] 2024-12-07T13:25:11,300 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:11,300 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:11,300 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:11,300 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:11,301 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:11,301 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577907272 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577911284 2024-12-07T13:25:11,301 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34539:34539),(127.0.0.1/127.0.0.1:36089:36089)] 2024-12-07T13:25:11,302 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 is not closed yet, will try archiving it next time 2024-12-07T13:25:11,302 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577907272 is not closed yet, will try archiving it next time 2024-12-07T13:25:11,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41655 is added to blk_1073741838_1020 (size=2431) 2024-12-07T13:25:11,343 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T13:25:11,594 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:11,705 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 is not closed yet, will try archiving it next time 2024-12-07T13:25:12,839 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:13,102 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3fa49935[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41655, datanodeUuid=0d78caf8-0f38-428f-9cf0-01e5ac9a9d07, infoPort=36089, infoSecurePort=0, ipcPort=39509, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741838_1020 to 127.0.0.1:41375 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:13,302 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:13,347 WARN [ResponseProcessor for block BP-959501690-172.17.0.3-1733577891118:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-959501690-172.17.0.3-1733577891118:blk_1073741840_1022 java.io.IOException: Bad response ERROR for BP-959501690-172.17.0.3-1733577891118:blk_1073741840_1022 from datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:13,348 WARN [DataStreamer for file /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577911284 block BP-959501690-172.17.0.3-1733577891118:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:13,348 WARN [PacketResponder: BP-959501690-172.17.0.3-1733577891118:blk_1073741840_1022, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41655] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:13,350 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:59966 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59966 dst: /127.0.0.1:43971 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:13,350 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:50002 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:41655:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50002 dst: /127.0.0.1:41655 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:13,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49584ab0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:13,382 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59d0863f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:25:13,382 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:25:13,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3eb2cdd3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:25:13,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ed35b1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,STOPPED} 2024-12-07T13:25:13,384 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:25:13,384 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-959501690-172.17.0.3-1733577891118 (Datanode Uuid 0d78caf8-0f38-428f-9cf0-01e5ac9a9d07) service to localhost/127.0.0.1:35785 2024-12-07T13:25:13,384 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:25:13,384 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:25:13,384 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data7/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:13,385 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data8/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:13,385 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:25:13,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43699 {}] regionserver.HRegion(8855): Flush requested on e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:25:13,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e6098b8ab71fc33e054cc7633f0ee629 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:25:13,414 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/216c4fe911b3477683ae13e40245e3e4 is 1080, key is row0002/info:/1733577909339/Put/seqid=0 2024-12-07T13:25:13,416 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40395 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:13,416 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:59986 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741841_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741841_1024 to mirror 127.0.0.1:40395 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:13,416 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:13,417 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741841_1024 2024-12-07T13:25:13,417 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:59986 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741841_1024] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T13:25:13,417 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:59986 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741841_1024] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59986 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:13,417 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] 2024-12-07T13:25:13,418 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:13,419 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]) is bad. 2024-12-07T13:25:13,419 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741842_1025 2024-12-07T13:25:13,419 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK] 2024-12-07T13:25:13,420 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:13,420 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:13,420 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741843_1026 2024-12-07T13:25:13,421 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:13,423 WARN [Thread-919 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40487 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:13,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60002 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741844_1027 to mirror 127.0.0.1:40487 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:13,423 WARN [Thread-919 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:13,423 WARN [Thread-919 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741844_1027 2024-12-07T13:25:13,423 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60002 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T13:25:13,423 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60002 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60002 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:13,423 WARN [Thread-919 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK] 2024-12-07T13:25:13,424 WARN [IPC Server handler 4 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T13:25:13,424 WARN [IPC Server handler 4 on default port 35785 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T13:25:13,424 WARN [IPC Server handler 4 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T13:25:13,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741845_1028 (size=10347) 2024-12-07T13:25:13,594 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:13,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/216c4fe911b3477683ae13e40245e3e4 2024-12-07T13:25:13,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/216c4fe911b3477683ae13e40245e3e4 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/216c4fe911b3477683ae13e40245e3e4 2024-12-07T13:25:13,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/216c4fe911b3477683ae13e40245e3e4, entries=5, sequenceid=11, filesize=10.1 K 2024-12-07T13:25:13,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for e6098b8ab71fc33e054cc7633f0ee629 in 457ms, sequenceid=11, compaction requested=false 2024-12-07T13:25:13,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e6098b8ab71fc33e054cc7633f0ee629: 2024-12-07T13:25:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43699 {}] regionserver.HRegion(8855): Flush requested on e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:25:14,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e6098b8ab71fc33e054cc7633f0ee629 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-07T13:25:14,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/b816850bb0ac49f7899c1430c40859eb is 1080, key is row0007/info:/1733577913396/Put/seqid=0 2024-12-07T13:25:14,040 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:14,041 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]) is bad. 2024-12-07T13:25:14,041 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741846_1029 2024-12-07T13:25:14,042 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK] 2024-12-07T13:25:14,043 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:14,043 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:14,043 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741847_1030 2024-12-07T13:25:14,044 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK] 2024-12-07T13:25:14,045 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:14,045 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:14,045 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741848_1031 2024-12-07T13:25:14,046 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:14,048 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40395 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:14,048 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60046 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741849_1032 to mirror 127.0.0.1:40395 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:14,049 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:14,049 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741849_1032 2024-12-07T13:25:14,049 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60046 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T13:25:14,049 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60046 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60046 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:14,049 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] 2024-12-07T13:25:14,050 WARN [IPC Server handler 0 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T13:25:14,050 WARN [IPC Server handler 0 on default port 35785 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T13:25:14,050 WARN [IPC Server handler 0 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T13:25:14,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741850_1033 (size=12506) 2024-12-07T13:25:14,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/b816850bb0ac49f7899c1430c40859eb 2024-12-07T13:25:14,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/b816850bb0ac49f7899c1430c40859eb as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/b816850bb0ac49f7899c1430c40859eb 2024-12-07T13:25:14,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/b816850bb0ac49f7899c1430c40859eb, entries=7, sequenceid=24, filesize=12.2 K 2024-12-07T13:25:14,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for e6098b8ab71fc33e054cc7633f0ee629 in 441ms, sequenceid=24, compaction requested=false 2024-12-07T13:25:14,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e6098b8ab71fc33e054cc7633f0ee629: 2024-12-07T13:25:14,469 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-07T13:25:14,469 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:14,469 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/b816850bb0ac49f7899c1430c40859eb because midkey is the same as first or last row 2024-12-07T13:25:14,839 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,302 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,303 WARN [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]] 2024-12-07T13:25:15,303 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C43699%2C1733577893481:(num 1733577911284) roll requested 2024-12-07T13:25:15,303 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C43699%2C1733577893481.1733577915303 2024-12-07T13:25:15,306 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,307 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:15,307 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741851_1034 2024-12-07T13:25:15,307 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:15,311 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41375 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,311 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60058 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741852_1035 to mirror 127.0.0.1:41375 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:15,311 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]) is bad. 2024-12-07T13:25:15,311 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741852_1035 2024-12-07T13:25:15,311 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60058 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-07T13:25:15,311 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60058 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60058 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:15,312 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK] 2024-12-07T13:25:15,314 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,314 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:15,314 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741853_1036 2024-12-07T13:25:15,315 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] 2024-12-07T13:25:15,317 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40487 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,317 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60062 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741854_1037 to mirror 127.0.0.1:40487 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:15,317 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:15,317 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741854_1037 2024-12-07T13:25:15,317 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60062 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-07T13:25:15,317 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60062 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60062 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:15,318 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK] 2024-12-07T13:25:15,319 WARN [IPC Server handler 1 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T13:25:15,319 WARN [IPC Server handler 1 on default port 35785 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T13:25:15,319 WARN [IPC Server handler 1 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T13:25:15,321 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:15,322 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:15,322 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:15,322 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:15,322 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:15,322 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577911284 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577915303 2024-12-07T13:25:15,323 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34539:34539)] 2024-12-07T13:25:15,323 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 is not closed yet, will try archiving it next time 2024-12-07T13:25:15,323 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577911284 is not closed yet, will try archiving it next time 2024-12-07T13:25:15,323 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577907272 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs/c7c455b68129%2C43699%2C1733577893481.1733577907272 2024-12-07T13:25:15,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741840_1023 (size=25992) 2024-12-07T13:25:15,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43699 {}] regionserver.HRegion(8855): Flush requested on e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:25:15,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e6098b8ab71fc33e054cc7633f0ee629 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-07T13:25:15,476 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/bf3faed5cadf42c6ade322c948a75d13 is 1079, key is tmprow/info:/1733577915464/Put/seqid=0 2024-12-07T13:25:15,479 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,479 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:15,479 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741856_1039 2024-12-07T13:25:15,480 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] 2024-12-07T13:25:15,481 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,481 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK], DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:15,481 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741857_1040 2024-12-07T13:25:15,482 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK] 2024-12-07T13:25:15,483 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,483 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]) is bad. 2024-12-07T13:25:15,483 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741858_1041 2024-12-07T13:25:15,483 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK] 2024-12-07T13:25:15,485 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,485 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:15,485 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741859_1042 2024-12-07T13:25:15,485 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:15,486 WARN [IPC Server handler 0 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T13:25:15,486 WARN [IPC Server handler 0 on default port 35785 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T13:25:15,486 WARN [IPC Server handler 0 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T13:25:15,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741860_1043 (size=6027) 2024-12-07T13:25:15,595 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,726 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 is not closed yet, will try archiving it next time 2024-12-07T13:25:15,890 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/bf3faed5cadf42c6ade322c948a75d13 2024-12-07T13:25:15,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/bf3faed5cadf42c6ade322c948a75d13 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/bf3faed5cadf42c6ade322c948a75d13 2024-12-07T13:25:15,909 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/bf3faed5cadf42c6ade322c948a75d13, entries=1, sequenceid=34, filesize=5.9 K 2024-12-07T13:25:15,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for e6098b8ab71fc33e054cc7633f0ee629 in 443ms, sequenceid=34, compaction requested=true 2024-12-07T13:25:15,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e6098b8ab71fc33e054cc7633f0ee629: 2024-12-07T13:25:15,910 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-07T13:25:15,910 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:15,910 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/b816850bb0ac49f7899c1430c40859eb because midkey is the same as first or last row 2024-12-07T13:25:15,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e6098b8ab71fc33e054cc7633f0ee629:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:25:15,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:25:15,911 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:25:15,912 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:25:15,912 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HStore(1541): e6098b8ab71fc33e054cc7633f0ee629/info is initiating minor compaction (all files) 2024-12-07T13:25:15,912 INFO [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e6098b8ab71fc33e054cc7633f0ee629/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:25:15,912 INFO [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/216c4fe911b3477683ae13e40245e3e4, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/b816850bb0ac49f7899c1430c40859eb, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/bf3faed5cadf42c6ade322c948a75d13] into tmpdir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp, totalSize=28.2 K 2024-12-07T13:25:15,913 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] compactions.Compactor(225): Compacting 216c4fe911b3477683ae13e40245e3e4, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733577909339 2024-12-07T13:25:15,913 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] compactions.Compactor(225): Compacting b816850bb0ac49f7899c1430c40859eb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733577913396 2024-12-07T13:25:15,914 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf3faed5cadf42c6ade322c948a75d13, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733577915464 2024-12-07T13:25:15,926 INFO [RS:0;c7c455b68129:43699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e6098b8ab71fc33e054cc7633f0ee629#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:25:15,927 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/108ffd11cb734e7bb248b46beaec63b5 is 1080, key is row0002/info:/1733577909339/Put/seqid=0 2024-12-07T13:25:15,929 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,929 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:15,929 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741861_1044 2024-12-07T13:25:15,930 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] 2024-12-07T13:25:15,931 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,931 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]) is bad. 2024-12-07T13:25:15,931 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741862_1045 2024-12-07T13:25:15,932 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK] 2024-12-07T13:25:15,933 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,933 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:15,933 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741863_1046 2024-12-07T13:25:15,933 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:15,936 WARN [Thread-943 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40487 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:15,935 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60104 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741864_1047 to mirror 127.0.0.1:40487 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:15,936 WARN [Thread-943 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:15,936 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60104 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T13:25:15,936 WARN [Thread-943 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741864_1047 2024-12-07T13:25:15,936 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60104 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60104 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:15,936 WARN [Thread-943 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK] 2024-12-07T13:25:15,937 WARN [IPC Server handler 3 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T13:25:15,937 WARN [IPC Server handler 3 on default port 35785 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T13:25:15,937 WARN [IPC Server handler 3 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T13:25:15,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741865_1048 (size=17994) 2024-12-07T13:25:15,984 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2c7d538b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741845_1028 to 127.0.0.1:41655 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:15,984 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@22bab874[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741850_1033 to 127.0.0.1:41655 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:16,357 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/108ffd11cb734e7bb248b46beaec63b5 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/108ffd11cb734e7bb248b46beaec63b5 2024-12-07T13:25:16,367 INFO [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e6098b8ab71fc33e054cc7633f0ee629/info of e6098b8ab71fc33e054cc7633f0ee629 into 108ffd11cb734e7bb248b46beaec63b5(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:25:16,367 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e6098b8ab71fc33e054cc7633f0ee629: 2024-12-07T13:25:16,367 INFO [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629., storeName=e6098b8ab71fc33e054cc7633f0ee629/info, priority=13, startTime=1733577915910; duration=0sec 2024-12-07T13:25:16,367 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-07T13:25:16,367 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:16,368 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/108ffd11cb734e7bb248b46beaec63b5 because midkey is the same as first or last row 2024-12-07T13:25:16,368 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-07T13:25:16,368 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:16,368 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/108ffd11cb734e7bb248b46beaec63b5 because midkey is the same as first or last row 2024-12-07T13:25:16,368 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-07T13:25:16,368 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:16,368 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/108ffd11cb734e7bb248b46beaec63b5 because midkey is the same as first or last row 2024-12-07T13:25:16,368 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:25:16,368 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e6098b8ab71fc33e054cc7633f0ee629:info 2024-12-07T13:25:16,840 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:16,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43699 {}] regionserver.HRegion(8855): Flush requested on e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:25:16,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e6098b8ab71fc33e054cc7633f0ee629 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-07T13:25:16,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/c4af9a650b1645038290fcd41826d118 is 1079, key is tmprow/info:/1733577916899/Put/seqid=0 2024-12-07T13:25:16,914 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:16,915 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:16,915 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741866_1049 2024-12-07T13:25:16,916 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] 2024-12-07T13:25:16,917 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:16,917 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]) is bad. 2024-12-07T13:25:16,917 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741867_1050 2024-12-07T13:25:16,918 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK] 2024-12-07T13:25:16,920 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40487 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:16,920 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60112 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741868_1051] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741868_1051 to mirror 127.0.0.1:40487 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:16,920 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:16,920 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741868_1051 2024-12-07T13:25:16,920 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60112 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741868_1051] {}] datanode.BlockReceiver(316): Block 1073741868 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T13:25:16,921 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60112 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741868_1051] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60112 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:16,921 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK] 2024-12-07T13:25:16,923 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41655 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:16,923 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60128 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741869_1052 to mirror 127.0.0.1:41655 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:16,924 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:16,924 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741869_1052 2024-12-07T13:25:16,924 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60128 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T13:25:16,924 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60128 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60128 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:16,924 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:16,925 WARN [IPC Server handler 2 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T13:25:16,925 WARN [IPC Server handler 2 on default port 35785 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T13:25:16,925 WARN [IPC Server handler 2 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T13:25:16,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741870_1053 (size=6027) 2024-12-07T13:25:16,985 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@22bab874[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741840_1023 to 127.0.0.1:41375 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:16,985 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2c7d538b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741860_1043 to 127.0.0.1:41655 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:17,323 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:17,324 WARN [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]] 2024-12-07T13:25:17,324 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C43699%2C1733577893481:(num 1733577915303) roll requested 2024-12-07T13:25:17,324 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C43699%2C1733577893481.1733577917324 2024-12-07T13:25:17,327 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:17,327 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:17,327 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741871_1054 2024-12-07T13:25:17,328 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:17,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/c4af9a650b1645038290fcd41826d118 2024-12-07T13:25:17,330 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:17,330 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:17,330 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741872_1055 2024-12-07T13:25:17,330 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] 2024-12-07T13:25:17,334 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40487 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:17,333 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60144 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741873_1056 to mirror 127.0.0.1:40487 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:17,334 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:17,334 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741873_1056 2024-12-07T13:25:17,334 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60144 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-07T13:25:17,334 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60144 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60144 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:17,335 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK] 2024-12-07T13:25:17,338 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41375 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:17,337 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60160 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741874_1057 to mirror 127.0.0.1:41375 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:17,338 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]) is bad. 2024-12-07T13:25:17,338 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60160 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-07T13:25:17,338 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741874_1057 2024-12-07T13:25:17,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60160 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60160 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:17,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/c4af9a650b1645038290fcd41826d118 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/c4af9a650b1645038290fcd41826d118 2024-12-07T13:25:17,338 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK] 2024-12-07T13:25:17,339 WARN [IPC Server handler 2 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T13:25:17,339 WARN [IPC Server handler 2 on default port 35785 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T13:25:17,339 WARN [IPC Server handler 2 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T13:25:17,342 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:17,342 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:17,342 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:17,342 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:17,342 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:17,342 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577915303 with entries=14, filesize=12.92 KB; new WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577917324 2024-12-07T13:25:17,343 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34539:34539)] 2024-12-07T13:25:17,343 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 is not closed yet, will try archiving it next time 2024-12-07T13:25:17,343 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577915303 is not closed yet, will try archiving it next time 2024-12-07T13:25:17,343 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577911284 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs/c7c455b68129%2C43699%2C1733577893481.1733577911284 2024-12-07T13:25:17,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741855_1038 (size=13234) 2024-12-07T13:25:17,344 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 is not closed yet, will try archiving it next time 2024-12-07T13:25:17,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/c4af9a650b1645038290fcd41826d118, entries=1, sequenceid=45, filesize=5.9 K 2024-12-07T13:25:17,346 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for e6098b8ab71fc33e054cc7633f0ee629 in 444ms, sequenceid=45, compaction requested=false 2024-12-07T13:25:17,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e6098b8ab71fc33e054cc7633f0ee629: 2024-12-07T13:25:17,346 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-07T13:25:17,347 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:17,347 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/108ffd11cb734e7bb248b46beaec63b5 because midkey is the same as first or last row 2024-12-07T13:25:17,596 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:18,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43699 {}] regionserver.HRegion(8855): Flush requested on e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:25:18,347 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e6098b8ab71fc33e054cc7633f0ee629 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-07T13:25:18,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/57e8ee5e0d194df3bbb99467ee4bc499 is 1079, key is tmprow/info:/1733577918344/Put/seqid=0 2024-12-07T13:25:18,359 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:18,360 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:18,360 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741876_1059 2024-12-07T13:25:18,360 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] 2024-12-07T13:25:18,363 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41655 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:18,363 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60174 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741877_1060 to mirror 127.0.0.1:41655 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:18,363 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:18,363 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741877_1060 2024-12-07T13:25:18,363 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60174 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T13:25:18,363 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60174 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60174 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:18,364 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:18,365 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:18,365 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]) is bad. 2024-12-07T13:25:18,365 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741878_1061 2024-12-07T13:25:18,366 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK] 2024-12-07T13:25:18,368 WARN [Thread-964 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40487 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:18,368 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60182 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741879_1062 to mirror 127.0.0.1:40487 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:18,368 WARN [Thread-964 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:18,368 WARN [Thread-964 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741879_1062 2024-12-07T13:25:18,368 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60182 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T13:25:18,369 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:60182 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60182 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:18,369 WARN [Thread-964 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK] 2024-12-07T13:25:18,370 WARN [IPC Server handler 1 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T13:25:18,370 WARN [IPC Server handler 1 on default port 35785 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T13:25:18,370 WARN [IPC Server handler 1 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T13:25:18,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741880_1063 (size=6027) 2024-12-07T13:25:18,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/57e8ee5e0d194df3bbb99467ee4bc499 2024-12-07T13:25:18,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/57e8ee5e0d194df3bbb99467ee4bc499 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/57e8ee5e0d194df3bbb99467ee4bc499 2024-12-07T13:25:18,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/57e8ee5e0d194df3bbb99467ee4bc499, entries=1, sequenceid=55, filesize=5.9 K 2024-12-07T13:25:18,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for e6098b8ab71fc33e054cc7633f0ee629 in 444ms, sequenceid=55, compaction requested=true 2024-12-07T13:25:18,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e6098b8ab71fc33e054cc7633f0ee629: 2024-12-07T13:25:18,792 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-12-07T13:25:18,792 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:18,792 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/108ffd11cb734e7bb248b46beaec63b5 because midkey is the same as first or last row 2024-12-07T13:25:18,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e6098b8ab71fc33e054cc7633f0ee629:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:25:18,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:25:18,792 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:25:18,793 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:25:18,794 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HStore(1541): e6098b8ab71fc33e054cc7633f0ee629/info is initiating minor compaction (all files) 2024-12-07T13:25:18,794 INFO [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of e6098b8ab71fc33e054cc7633f0ee629/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:25:18,794 INFO [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/108ffd11cb734e7bb248b46beaec63b5, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/c4af9a650b1645038290fcd41826d118, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/57e8ee5e0d194df3bbb99467ee4bc499] into tmpdir=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp, totalSize=29.3 K 2024-12-07T13:25:18,795 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] compactions.Compactor(225): Compacting 108ffd11cb734e7bb248b46beaec63b5, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733577909339 2024-12-07T13:25:18,795 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] compactions.Compactor(225): Compacting c4af9a650b1645038290fcd41826d118, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733577916899 2024-12-07T13:25:18,796 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] compactions.Compactor(225): Compacting 57e8ee5e0d194df3bbb99467ee4bc499, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733577918344 2024-12-07T13:25:18,815 INFO [RS:0;c7c455b68129:43699-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e6098b8ab71fc33e054cc7633f0ee629#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:25:18,816 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/492bd80a46254bb6a8d5afecfd56a89a is 1080, key is row0002/info:/1733577909339/Put/seqid=0 2024-12-07T13:25:18,817 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:18,817 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]) is bad. 2024-12-07T13:25:18,817 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741881_1064 2024-12-07T13:25:18,818 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40395,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK] 2024-12-07T13:25:18,819 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:18,819 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:18,819 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741882_1065 2024-12-07T13:25:18,820 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK] 2024-12-07T13:25:18,821 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:18,821 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]) is bad. 2024-12-07T13:25:18,821 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741883_1066 2024-12-07T13:25:18,821 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK] 2024-12-07T13:25:18,822 WARN [Thread-970 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:18,822 WARN [Thread-970 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:18,823 WARN [Thread-970 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741884_1067 2024-12-07T13:25:18,823 WARN [Thread-970 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:18,824 WARN [IPC Server handler 1 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T13:25:18,824 WARN [IPC Server handler 1 on default port 35785 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T13:25:18,824 WARN [IPC Server handler 1 on default port 35785 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T13:25:18,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741885_1068 (size=18097) 2024-12-07T13:25:18,840 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:18,984 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2c7d538b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741865_1048 to 127.0.0.1:40395 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:18,984 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@22bab874[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741870_1053 to 127.0.0.1:40395 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:19,242 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/492bd80a46254bb6a8d5afecfd56a89a as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/492bd80a46254bb6a8d5afecfd56a89a 2024-12-07T13:25:19,252 INFO [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in e6098b8ab71fc33e054cc7633f0ee629/info of e6098b8ab71fc33e054cc7633f0ee629 into 492bd80a46254bb6a8d5afecfd56a89a(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:25:19,253 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for e6098b8ab71fc33e054cc7633f0ee629: 2024-12-07T13:25:19,253 INFO [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629., storeName=e6098b8ab71fc33e054cc7633f0ee629/info, priority=13, startTime=1733577918792; duration=0sec 2024-12-07T13:25:19,253 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-07T13:25:19,253 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:19,253 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/492bd80a46254bb6a8d5afecfd56a89a because midkey is the same as first or last row 2024-12-07T13:25:19,253 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-07T13:25:19,253 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:19,253 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/492bd80a46254bb6a8d5afecfd56a89a because midkey is the same as first or last row 2024-12-07T13:25:19,253 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-07T13:25:19,253 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:19,254 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/492bd80a46254bb6a8d5afecfd56a89a because midkey is the same as first or last row 2024-12-07T13:25:19,254 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:25:19,254 DEBUG [RS:0;c7c455b68129:43699-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e6098b8ab71fc33e054cc7633f0ee629:info 2024-12-07T13:25:19,343 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:19,344 WARN [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-07T13:25:19,389 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:25:19,392 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:25:19,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:25:19,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:25:19,392 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:25:19,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@366bb257{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:25:19,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16711f95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:25:19,484 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@34e71a9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/java.io.tmpdir/jetty-localhost-41299-hadoop-hdfs-3_4_1-tests_jar-_-any-3831854398692696664/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:19,484 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f0b77b3{HTTP/1.1, (http/1.1)}{localhost:41299} 2024-12-07T13:25:19,484 INFO [Time-limited test {}] server.Server(415): Started @137324ms 2024-12-07T13:25:19,485 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:25:19,596 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:19,969 WARN [Thread-988 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:25:19,976 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91b576ef9e7adba2 with lease ID 0xea89a762f7bb534a: from storage DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd node DatanodeRegistration(127.0.0.1:39707, datanodeUuid=bafc61d8-ce26-4093-91ed-a19abb4f3d38, infoPort=39547, infoSecurePort=0, ipcPort=39125, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T13:25:19,976 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x91b576ef9e7adba2 with lease ID 0xea89a762f7bb534a: from storage DS-9e930cb4-428e-4e08-9621-be88807a9acf node DatanodeRegistration(127.0.0.1:39707, datanodeUuid=bafc61d8-ce26-4093-91ed-a19abb4f3d38, infoPort=39547, infoSecurePort=0, ipcPort=39125, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:19,984 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@22bab874[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741855_1038 to 127.0.0.1:41375 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:19,984 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2c7d538b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741880_1063 to 127.0.0.1:41375 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:20,840 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:21,344 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:21,597 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:21,987 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@22bab874[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43971, datanodeUuid=75be824e-cc7b-46b3-a1bc-5cae5177bdc9, infoPort=34539, infoSecurePort=0, ipcPort=43667, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741885_1068 to 127.0.0.1:40487 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:22,841 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:23,262 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T13:25:23,345 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:23,597 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:23,858 ERROR [FSHLog-0-hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData-prefix:c7c455b68129,46593,1733577893287 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:23,858 WARN [FSHLog-0-hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData-prefix:c7c455b68129,46593,1733577893287 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:23,859 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C46593%2C1733577893287:(num 1733577893614) roll requested 2024-12-07T13:25:23,860 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C46593%2C1733577893287.1733577923859 2024-12-07T13:25:23,864 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:23,864 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:23,864 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741886_1069 2024-12-07T13:25:23,865 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:23,866 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:23,867 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK], DatanodeInfoWithStorage[127.0.0.1:39707,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]) is bad. 2024-12-07T13:25:23,867 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741887_1070 2024-12-07T13:25:23,867 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK] 2024-12-07T13:25:23,869 WARN [Thread-1008 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:23,869 WARN [Thread-1008 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK]) is bad. 2024-12-07T13:25:23,869 WARN [Thread-1008 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741888_1071 2024-12-07T13:25:23,870 WARN [Thread-1008 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40487,DS-362498b2-c630-41dd-a062-e31650fa79e9,DISK] 2024-12-07T13:25:23,873 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:23,873 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:23,873 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:23,874 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:23,874 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:23,874 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577893614 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577923859 2024-12-07T13:25:23,874 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:23,874 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:23,875 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577893614 2024-12-07T13:25:23,875 WARN [IPC Server handler 1 on default port 35785 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577893614 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-12-07T13:25:23,875 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577893614 after 0ms 2024-12-07T13:25:23,877 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39547:39547),(127.0.0.1/127.0.0.1:34539:34539)] 2024-12-07T13:25:23,877 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577893614 is not closed yet, will try archiving it next time 2024-12-07T13:25:24,842 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:25,346 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:26,842 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:27,346 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:27,878 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577893614 after 4003ms 2024-12-07T13:25:28,843 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:29,347 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:29,990 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7a838b51 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:41375,null,null]) java.net.ConnectException: Call From c7c455b68129/172.17.0.3 to localhost:33741 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-07T13:25:29,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741833_1019 (size=455) 2024-12-07T13:25:30,317 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577894113 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs/c7c455b68129%2C43699%2C1733577893481.1733577894113 2024-12-07T13:25:30,319 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577915303 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs/c7c455b68129%2C43699%2C1733577893481.1733577915303 2024-12-07T13:25:30,843 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:30,977 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4bb31557[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39707, datanodeUuid=bafc61d8-ce26-4093-91ed-a19abb4f3d38, infoPort=39547, infoSecurePort=0, ipcPort=39125, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741833_1019 to 127.0.0.1:41375 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:31,347 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:32,844 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,175 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C43699%2C1733577893481.1733577933175 2024-12-07T13:25:33,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,185 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,185 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,185 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,185 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,186 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577917324 with entries=13, filesize=11.81 KB; new WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577933175 2024-12-07T13:25:33,187 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34539:34539),(127.0.0.1/127.0.0.1:39547:39547)] 2024-12-07T13:25:33,187 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577917324 is not closed yet, will try archiving it next time 2024-12-07T13:25:33,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741875_1058 (size=12100) 2024-12-07T13:25:33,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43699 {}] regionserver.HRegion(8855): Flush requested on e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:25:33,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing e6098b8ab71fc33e054cc7633f0ee629 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-07T13:25:33,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/f59025a9068045638253258e4ded1565 is 1080, key is row0013/info:/1733577933188/Put/seqid=0 2024-12-07T13:25:33,201 WARN [Thread-1024 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41655 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,201 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:41352 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741891_1075 to mirror 127.0.0.1:41655 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:33,202 WARN [Thread-1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:33,202 WARN [Thread-1024 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741891_1075 2024-12-07T13:25:33,202 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:41352 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T13:25:33,202 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:41352 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41352 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:33,203 WARN [Thread-1024 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:33,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741892_1076 (size=9267) 2024-12-07T13:25:33,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741892_1076 (size=9267) 2024-12-07T13:25:33,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/f59025a9068045638253258e4ded1565 2024-12-07T13:25:33,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/f59025a9068045638253258e4ded1565 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/f59025a9068045638253258e4ded1565 2024-12-07T13:25:33,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/f59025a9068045638253258e4ded1565, entries=4, sequenceid=66, filesize=9.0 K 2024-12-07T13:25:33,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for e6098b8ab71fc33e054cc7633f0ee629 in 34ms, sequenceid=66, compaction requested=false 2024-12-07T13:25:33,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for e6098b8ab71fc33e054cc7633f0ee629: 2024-12-07T13:25:33,227 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-12-07T13:25:33,227 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:25:33,227 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/492bd80a46254bb6a8d5afecfd56a89a because midkey is the same as first or last row 2024-12-07T13:25:33,348 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,348 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-07T13:25:33,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T13:25:33,412 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:25:33,412 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:25:33,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:25:33,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:25:33,412 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T13:25:33,413 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T13:25:33,413 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1858445601, stopped=false 2024-12-07T13:25:33,413 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c7c455b68129,46593,1733577893287 2024-12-07T13:25:33,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:25:33,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:25:33,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42863-0x10000736c570002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:25:33,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:33,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42863-0x10000736c570002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:33,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:33,473 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:25:33,474 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:25:33,475 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:25:33,475 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:25:33,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42863-0x10000736c570002, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:25:33,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:25:33,476 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c7c455b68129,43699,1733577893481' ***** 2024-12-07T13:25:33,476 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T13:25:33,476 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c7c455b68129,42863,1733577894737' ***** 2024-12-07T13:25:33,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:25:33,476 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T13:25:33,476 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T13:25:33,476 INFO [RS:1;c7c455b68129:42863 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T13:25:33,477 INFO [RS:0;c7c455b68129:43699 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T13:25:33,477 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T13:25:33,477 INFO [RS:1;c7c455b68129:42863 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T13:25:33,477 INFO [RS:1;c7c455b68129:42863 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T13:25:33,477 INFO [RS:0;c7c455b68129:43699 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T13:25:33,477 INFO [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(959): stopping server c7c455b68129,42863,1733577894737 2024-12-07T13:25:33,477 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T13:25:33,477 INFO [RS:1;c7c455b68129:42863 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:25:33,477 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(3091): Received CLOSE for e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:25:33,477 INFO [RS:1;c7c455b68129:42863 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;c7c455b68129:42863. 2024-12-07T13:25:33,478 DEBUG [RS:1;c7c455b68129:42863 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:25:33,478 DEBUG [RS:1;c7c455b68129:42863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:25:33,478 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(959): stopping server c7c455b68129,43699,1733577893481 2024-12-07T13:25:33,478 INFO [RS:0;c7c455b68129:43699 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:25:33,478 INFO [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(976): stopping server c7c455b68129,42863,1733577894737; all regions closed. 2024-12-07T13:25:33,478 INFO [RS:0;c7c455b68129:43699 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c7c455b68129:43699. 2024-12-07T13:25:33,478 DEBUG [RS:0;c7c455b68129:43699 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:25:33,478 DEBUG [RS:0;c7c455b68129:43699 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:25:33,478 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e6098b8ab71fc33e054cc7633f0ee629, disabling compactions & flushes 2024-12-07T13:25:33,478 INFO [RS:0;c7c455b68129:43699 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T13:25:33,478 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:25:33,478 INFO [RS:0;c7c455b68129:43699 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T13:25:33,478 INFO [RS:0;c7c455b68129:43699 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T13:25:33,478 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:25:33,479 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T13:25:33,479 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,479 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. after waiting 0 ms 2024-12-07T13:25:33,479 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:25:33,479 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,479 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,479 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T13:25:33,479 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing e6098b8ab71fc33e054cc7633f0ee629 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-07T13:25:33,479 DEBUG [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, e6098b8ab71fc33e054cc7633f0ee629=TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.} 2024-12-07T13:25:33,479 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,479 DEBUG [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e6098b8ab71fc33e054cc7633f0ee629 2024-12-07T13:25:33,479 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:25:33,479 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,479 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:25:33,479 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:25:33,479 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:25:33,479 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:25:33,479 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-07T13:25:33,479 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,480 ERROR [FSHLog-0-hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084-prefix:c7c455b68129,43699,1733577893481.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,480 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,480 WARN [FSHLog-0-hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084-prefix:c7c455b68129,43699,1733577893481.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,480 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 2024-12-07T13:25:33,480 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C43699%2C1733577893481.meta:.meta(num 1733577894498) roll requested 2024-12-07T13:25:33,480 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C43699%2C1733577893481.meta.1733577933480.meta 2024-12-07T13:25:33,480 WARN [IPC Server handler 2 on default port 35785 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1013 2024-12-07T13:25:33,481 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 after 1ms 2024-12-07T13:25:33,483 WARN [Thread-1033 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,483 WARN [Thread-1033 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741893_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:39707,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:33,483 WARN [Thread-1033 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741893_1078 2024-12-07T13:25:33,484 WARN [Thread-1033 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:33,484 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/2a4ee7f545dd479a99c9d63ff60bb5c6 is 1080, key is row0016/info:/1733577933195/Put/seqid=0 2024-12-07T13:25:33,486 WARN [Thread-1034 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,486 WARN [Thread-1034 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741895_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:39707,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:33,486 WARN [Thread-1034 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741895_1080 2024-12-07T13:25:33,487 WARN [Thread-1034 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:33,487 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,487 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,487 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,487 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,487 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,487 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577933480.meta 2024-12-07T13:25:33,487 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,488 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41375,DS-5b80aff9-5093-47e6-b68d-7fda9ccb5d7b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,488 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta 2024-12-07T13:25:33,488 WARN [IPC Server handler 0 on default port 35785 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta has not been closed. Lease recovery is in progress. RecoveryId = 1082 for block blk_1073741834_1010 2024-12-07T13:25:33,488 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta after 0ms 2024-12-07T13:25:33,492 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34539:34539),(127.0.0.1/127.0.0.1:39547:39547)] 2024-12-07T13:25:33,492 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta is not closed yet, will try archiving it next time 2024-12-07T13:25:33,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741896_1081 (size=13583) 2024-12-07T13:25:33,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741896_1081 (size=13583) 2024-12-07T13:25:33,495 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/2a4ee7f545dd479a99c9d63ff60bb5c6 2024-12-07T13:25:33,501 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/.tmp/info/2a4ee7f545dd479a99c9d63ff60bb5c6 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/2a4ee7f545dd479a99c9d63ff60bb5c6 2024-12-07T13:25:33,507 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/2a4ee7f545dd479a99c9d63ff60bb5c6, entries=8, sequenceid=77, filesize=13.3 K 2024-12-07T13:25:33,508 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for e6098b8ab71fc33e054cc7633f0ee629 in 29ms, sequenceid=77, compaction requested=true 2024-12-07T13:25:33,508 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/.tmp/info/86c5f000ec6047dd9cf4aea71584e492 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629./info:regioninfo/1733577895245/Put/seqid=0 2024-12-07T13:25:33,508 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/216c4fe911b3477683ae13e40245e3e4, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/b816850bb0ac49f7899c1430c40859eb, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/108ffd11cb734e7bb248b46beaec63b5, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/bf3faed5cadf42c6ade322c948a75d13, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/c4af9a650b1645038290fcd41826d118, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/57e8ee5e0d194df3bbb99467ee4bc499] to archive 2024-12-07T13:25:33,509 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T13:25:33,510 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,510 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:33,510 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741897_1083 2024-12-07T13:25:33,510 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:33,511 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/216c4fe911b3477683ae13e40245e3e4 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/216c4fe911b3477683ae13e40245e3e4 2024-12-07T13:25:33,512 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/b816850bb0ac49f7899c1430c40859eb to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/b816850bb0ac49f7899c1430c40859eb 2024-12-07T13:25:33,513 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/108ffd11cb734e7bb248b46beaec63b5 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/108ffd11cb734e7bb248b46beaec63b5 2024-12-07T13:25:33,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741898_1084 (size=7089) 2024-12-07T13:25:33,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741898_1084 (size=7089) 2024-12-07T13:25:33,515 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/.tmp/info/86c5f000ec6047dd9cf4aea71584e492 2024-12-07T13:25:33,515 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/bf3faed5cadf42c6ade322c948a75d13 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/bf3faed5cadf42c6ade322c948a75d13 2024-12-07T13:25:33,516 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/c4af9a650b1645038290fcd41826d118 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/c4af9a650b1645038290fcd41826d118 2024-12-07T13:25:33,517 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/57e8ee5e0d194df3bbb99467ee4bc499 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/info/57e8ee5e0d194df3bbb99467ee4bc499 2024-12-07T13:25:33,518 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c7c455b68129:46593 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-07T13:25:33,518 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [216c4fe911b3477683ae13e40245e3e4=10347, b816850bb0ac49f7899c1430c40859eb=12506, 108ffd11cb734e7bb248b46beaec63b5=17994, bf3faed5cadf42c6ade322c948a75d13=6027, c4af9a650b1645038290fcd41826d118=6027, 57e8ee5e0d194df3bbb99467ee4bc499=6027] 2024-12-07T13:25:33,522 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/default/TestLogRolling-testLogRollOnDatanodeDeath/e6098b8ab71fc33e054cc7633f0ee629/recovered.edits/80.seqid, newMaxSeqId=80, maxSeqId=1 2024-12-07T13:25:33,522 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:25:33,523 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e6098b8ab71fc33e054cc7633f0ee629: Waiting for close lock at 1733577933478Running coprocessor pre-close hooks at 1733577933478Disabling compacts and flushes for region at 1733577933478Disabling writes for close at 1733577933479 (+1 ms)Obtaining lock to block concurrent updates at 1733577933479Preparing flush snapshotting stores in e6098b8ab71fc33e054cc7633f0ee629 at 1733577933479Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629., syncing WAL and waiting on mvcc, flushsize=dataSize=8607, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1733577933479Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. at 1733577933480 (+1 ms)Flushing e6098b8ab71fc33e054cc7633f0ee629/info: creating writer at 1733577933481 (+1 ms)Flushing e6098b8ab71fc33e054cc7633f0ee629/info: appending metadata at 1733577933484 (+3 ms)Flushing e6098b8ab71fc33e054cc7633f0ee629/info: closing flushed file at 1733577933484Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f0cadd4: reopening flushed file at 1733577933500 (+16 ms)Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for e6098b8ab71fc33e054cc7633f0ee629 in 29ms, sequenceid=77, compaction requested=true at 1733577933508 (+8 ms)Writing region close event to WAL at 1733577933518 (+10 ms)Running coprocessor post-close hooks at 1733577933522 (+4 ms)Closed at 1733577933522 2024-12-07T13:25:33,523 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733577894867.e6098b8ab71fc33e054cc7633f0ee629. 2024-12-07T13:25:33,533 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/.tmp/ns/116f1cb77eb3417d8a01d2c35201b66b is 43, key is default/ns:d/1733577894674/Put/seqid=0 2024-12-07T13:25:33,536 WARN [Thread-1052 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41655 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,536 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:41408 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6]'}, localName='127.0.0.1:43971', datanodeUuid='75be824e-cc7b-46b3-a1bc-5cae5177bdc9', xmitsInProgress=0}:Exception transferring block BP-959501690-172.17.0.3-1733577891118:blk_1073741899_1085 to mirror 127.0.0.1:41655 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:33,536 WARN [Thread-1052 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43971,DS-317dae2a-1dd6-4115-b7da-799a141a3dbf,DISK], DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:33,536 WARN [Thread-1052 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741899_1085 2024-12-07T13:25:33,536 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:41408 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-07T13:25:33,536 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1283404152_22 at /127.0.0.1:41408 [Receiving block BP-959501690-172.17.0.3-1733577891118:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:43971:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41408 dst: /127.0.0.1:43971 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:33,537 WARN [Thread-1052 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:33,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741900_1086 (size=5153) 2024-12-07T13:25:33,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741900_1086 (size=5153) 2024-12-07T13:25:33,541 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/.tmp/ns/116f1cb77eb3417d8a01d2c35201b66b 2024-12-07T13:25:33,559 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/.tmp/table/058470a0d7f84ca8a6baba6fbca0386e is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733577895260/Put/seqid=0 2024-12-07T13:25:33,561 WARN [Thread-1059 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:33,561 WARN [Thread-1059 {}] hdfs.DataStreamer(1731): Error Recovery for BP-959501690-172.17.0.3-1733577891118:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK], DatanodeInfoWithStorage[127.0.0.1:39707,DS-bc541ee7-02ba-4fbd-8aa8-6b25f00a10bd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK]) is bad. 2024-12-07T13:25:33,561 WARN [Thread-1059 {}] hdfs.DataStreamer(1850): Abandoning BP-959501690-172.17.0.3-1733577891118:blk_1073741901_1087 2024-12-07T13:25:33,562 WARN [Thread-1059 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41655,DS-ce9ff6f4-6fc6-4a51-89c6-edb968514e60,DISK] 2024-12-07T13:25:33,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741902_1088 (size=5424) 2024-12-07T13:25:33,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741902_1088 (size=5424) 2024-12-07T13:25:33,567 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/.tmp/table/058470a0d7f84ca8a6baba6fbca0386e 2024-12-07T13:25:33,573 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/.tmp/info/86c5f000ec6047dd9cf4aea71584e492 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/info/86c5f000ec6047dd9cf4aea71584e492 2024-12-07T13:25:33,579 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/info/86c5f000ec6047dd9cf4aea71584e492, entries=10, sequenceid=11, filesize=6.9 K 2024-12-07T13:25:33,580 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/.tmp/ns/116f1cb77eb3417d8a01d2c35201b66b as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/ns/116f1cb77eb3417d8a01d2c35201b66b 2024-12-07T13:25:33,586 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/ns/116f1cb77eb3417d8a01d2c35201b66b, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T13:25:33,587 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/.tmp/table/058470a0d7f84ca8a6baba6fbca0386e as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/table/058470a0d7f84ca8a6baba6fbca0386e 2024-12-07T13:25:33,588 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.1733577917324 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs/c7c455b68129%2C43699%2C1733577893481.1733577917324 2024-12-07T13:25:33,593 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/table/058470a0d7f84ca8a6baba6fbca0386e, entries=2, sequenceid=11, filesize=5.3 K 2024-12-07T13:25:33,594 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 115ms, sequenceid=11, compaction requested=false 2024-12-07T13:25:33,599 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T13:25:33,599 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:25:33,599 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:25:33,600 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733577933479Running coprocessor pre-close hooks at 1733577933479Disabling compacts and flushes for region at 1733577933479Disabling writes for close at 1733577933479Obtaining lock to block concurrent updates at 1733577933479Preparing flush snapshotting stores in 1588230740 at 1733577933479Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733577933480 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733577933492 (+12 ms)Flushing 1588230740/info: creating writer at 1733577933492Flushing 1588230740/info: appending metadata at 1733577933508 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733577933508Flushing 1588230740/ns: creating writer at 1733577933521 (+13 ms)Flushing 1588230740/ns: appending metadata at 1733577933533 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1733577933533Flushing 1588230740/table: creating writer at 1733577933546 (+13 ms)Flushing 1588230740/table: appending metadata at 1733577933559 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733577933559Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@449b1eff: reopening flushed file at 1733577933572 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13bce42b: reopening flushed file at 1733577933579 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34fa524a: reopening flushed file at 1733577933586 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 115ms, sequenceid=11, compaction requested=false at 1733577933594 (+8 ms)Writing region close event to WAL at 1733577933595 (+1 ms)Running coprocessor post-close hooks at 1733577933599 (+4 ms)Closed at 1733577933599 2024-12-07T13:25:33,600 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T13:25:33,679 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(976): stopping server c7c455b68129,43699,1733577893481; all regions closed. 2024-12-07T13:25:33,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,680 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,680 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,680 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:33,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741894_1079 (size=825) 2024-12-07T13:25:33,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741894_1079 (size=825) 2024-12-07T13:25:33,851 INFO [regionserver/c7c455b68129:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T13:25:33,852 INFO [regionserver/c7c455b68129:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T13:25:33,982 INFO [regionserver/c7c455b68129:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:25:34,055 INFO [regionserver/c7c455b68129:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T13:25:34,055 INFO [regionserver/c7c455b68129:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T13:25:34,691 INFO [master/c7c455b68129:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-07T13:25:34,691 INFO [master/c7c455b68129:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-07T13:25:34,841 INFO [regionserver/c7c455b68129:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:25:34,979 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1aac741e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39707, datanodeUuid=bafc61d8-ce26-4093-91ed-a19abb4f3d38, infoPort=39547, infoSecurePort=0, ipcPort=39125, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741831_1007 to 127.0.0.1:41655 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:34,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741835_1011 (size=393) 2024-12-07T13:25:35,976 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@1aac741e[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39707, datanodeUuid=bafc61d8-ce26-4093-91ed-a19abb4f3d38, infoPort=39547, infoSecurePort=0, ipcPort=39125, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741827_1003 to 127.0.0.1:41655 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:35,976 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4bb31557[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39707, datanodeUuid=bafc61d8-ce26-4093-91ed-a19abb4f3d38, infoPort=39547, infoSecurePort=0, ipcPort=39125, storageInfo=lv=-57;cid=testClusterID;nsid=2132482547;c=1733577891118):Failed to transfer BP-959501690-172.17.0.3-1733577891118:blk_1073741829_1005 to 127.0.0.1:41655 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:36,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:25:36,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:25:36,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741875_1058 (size=12100) 2024-12-07T13:25:37,483 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 after 4003ms 2024-12-07T13:25:37,490 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta after 4002ms 2024-12-07T13:25:37,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:25:38,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-07T13:25:38,429 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:25:38,429 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T13:25:38,480 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-07T13:25:38,485 DEBUG [RS:1;c7c455b68129:42863 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs 2024-12-07T13:25:38,485 INFO [RS:1;c7c455b68129:42863 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C42863%2C1733577894737:(num 1733577894972) 2024-12-07T13:25:38,485 DEBUG [RS:1;c7c455b68129:42863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:25:38,485 INFO [RS:1;c7c455b68129:42863 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:25:38,485 INFO [RS:1;c7c455b68129:42863 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:25:38,486 INFO [RS:1;c7c455b68129:42863 {}] hbase.ChoreService(370): Chore service for: regionserver/c7c455b68129:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T13:25:38,486 INFO [RS:1;c7c455b68129:42863 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T13:25:38,486 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:25:38,486 INFO [RS:1;c7c455b68129:42863 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T13:25:38,486 INFO [RS:1;c7c455b68129:42863 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T13:25:38,486 INFO [RS:1;c7c455b68129:42863 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:25:38,487 INFO [RS:1;c7c455b68129:42863 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42863 2024-12-07T13:25:38,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:38,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:38,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:38,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:38,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:38,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:38,543 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:38,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:38,556 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:38,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42863-0x10000736c570002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c7c455b68129,42863,1733577894737 2024-12-07T13:25:38,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:25:38,601 INFO [RS:1;c7c455b68129:42863 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:25:38,625 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c7c455b68129,42863,1733577894737] 2024-12-07T13:25:38,635 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c7c455b68129,42863,1733577894737 already deleted, retry=false 2024-12-07T13:25:38,636 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c7c455b68129,42863,1733577894737 expired; onlineServers=1 2024-12-07T13:25:38,681 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-07T13:25:38,687 DEBUG [RS:0;c7c455b68129:43699 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs 2024-12-07T13:25:38,687 INFO [RS:0;c7c455b68129:43699 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C43699%2C1733577893481.meta:.meta(num 1733577933480) 2024-12-07T13:25:38,688 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:38,688 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:38,688 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:38,689 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:38,689 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:38,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741890_1074 (size=14682) 2024-12-07T13:25:38,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741890_1074 (size=14682) 2024-12-07T13:25:38,694 DEBUG [RS:0;c7c455b68129:43699 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs 2024-12-07T13:25:38,694 INFO [RS:0;c7c455b68129:43699 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C43699%2C1733577893481:(num 1733577933175) 2024-12-07T13:25:38,694 DEBUG [RS:0;c7c455b68129:43699 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:25:38,694 INFO [RS:0;c7c455b68129:43699 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:25:38,694 INFO [RS:0;c7c455b68129:43699 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:25:38,694 INFO [RS:0;c7c455b68129:43699 {}] hbase.ChoreService(370): Chore service for: regionserver/c7c455b68129:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T13:25:38,694 INFO [RS:0;c7c455b68129:43699 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:25:38,694 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:25:38,695 INFO [RS:0;c7c455b68129:43699 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43699 2024-12-07T13:25:38,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:25:38,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c7c455b68129,43699,1733577893481 2024-12-07T13:25:38,703 INFO [RS:0;c7c455b68129:43699 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:25:38,714 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c7c455b68129,43699,1733577893481] 2024-12-07T13:25:38,725 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c7c455b68129,43699,1733577893481 already deleted, retry=false 2024-12-07T13:25:38,725 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c7c455b68129,43699,1733577893481 expired; onlineServers=0 2024-12-07T13:25:38,725 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c7c455b68129,46593,1733577893287' ***** 2024-12-07T13:25:38,725 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T13:25:38,725 INFO [M:0;c7c455b68129:46593 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:25:38,725 INFO [M:0;c7c455b68129:46593 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:25:38,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42863-0x10000736c570002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:25:38,725 INFO [RS:1;c7c455b68129:42863 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:25:38,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42863-0x10000736c570002, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:25:38,726 INFO [RS:1;c7c455b68129:42863 {}] regionserver.HRegionServer(1031): Exiting; stopping=c7c455b68129,42863,1733577894737; zookeeper connection closed. 2024-12-07T13:25:38,726 DEBUG [M:0;c7c455b68129:46593 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T13:25:38,726 DEBUG [M:0;c7c455b68129:46593 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T13:25:38,726 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T13:25:38,726 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577893856 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577893856,5,FailOnTimeoutGroup] 2024-12-07T13:25:38,726 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577893856 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577893856,5,FailOnTimeoutGroup] 2024-12-07T13:25:38,726 INFO [M:0;c7c455b68129:46593 {}] hbase.ChoreService(370): Chore service for: master/c7c455b68129:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T13:25:38,726 INFO [M:0;c7c455b68129:46593 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:25:38,727 DEBUG [M:0;c7c455b68129:46593 {}] master.HMaster(1795): Stopping service threads 2024-12-07T13:25:38,727 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d687df3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d687df3 2024-12-07T13:25:38,727 INFO [M:0;c7c455b68129:46593 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T13:25:38,727 INFO [M:0;c7c455b68129:46593 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:25:38,727 INFO [M:0;c7c455b68129:46593 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T13:25:38,728 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T13:25:38,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T13:25:38,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:38,735 DEBUG [M:0;c7c455b68129:46593 {}] zookeeper.ZKUtil(347): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T13:25:38,735 WARN [M:0;c7c455b68129:46593 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T13:25:38,736 INFO [M:0;c7c455b68129:46593 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/.lastflushedseqids 2024-12-07T13:25:38,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741903_1089 (size=130) 2024-12-07T13:25:38,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741903_1089 (size=130) 2024-12-07T13:25:38,746 INFO [M:0;c7c455b68129:46593 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T13:25:38,746 INFO [M:0;c7c455b68129:46593 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T13:25:38,747 DEBUG [M:0;c7c455b68129:46593 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:25:38,747 INFO [M:0;c7c455b68129:46593 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:25:38,747 DEBUG [M:0;c7c455b68129:46593 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:25:38,747 DEBUG [M:0;c7c455b68129:46593 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:25:38,747 DEBUG [M:0;c7c455b68129:46593 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:25:38,747 INFO [M:0;c7c455b68129:46593 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-12-07T13:25:38,762 DEBUG [M:0;c7c455b68129:46593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/553b333ea70c40cdb472eda49c8ce9b4 is 82, key is hbase:meta,,1/info:regioninfo/1733577894529/Put/seqid=0 2024-12-07T13:25:38,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741904_1090 (size=5672) 2024-12-07T13:25:38,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741904_1090 (size=5672) 2024-12-07T13:25:38,768 INFO [M:0;c7c455b68129:46593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/553b333ea70c40cdb472eda49c8ce9b4 2024-12-07T13:25:38,785 DEBUG [M:0;c7c455b68129:46593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9ba1eb0afae04442b2865fcb04cfe475 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733577895265/Put/seqid=0 2024-12-07T13:25:38,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741905_1091 (size=6254) 2024-12-07T13:25:38,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741905_1091 (size=6254) 2024-12-07T13:25:38,790 INFO [M:0;c7c455b68129:46593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9ba1eb0afae04442b2865fcb04cfe475 2024-12-07T13:25:38,795 INFO [M:0;c7c455b68129:46593 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9ba1eb0afae04442b2865fcb04cfe475 2024-12-07T13:25:38,807 DEBUG [M:0;c7c455b68129:46593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d86c954bd680412bbd2a612ecb73bcf0 is 69, key is c7c455b68129,42863,1733577894737/rs:state/1733577894815/Put/seqid=0 2024-12-07T13:25:38,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741906_1092 (size=5224) 2024-12-07T13:25:38,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741906_1092 (size=5224) 2024-12-07T13:25:38,813 INFO [M:0;c7c455b68129:46593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d86c954bd680412bbd2a612ecb73bcf0 2024-12-07T13:25:38,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:25:38,814 INFO [RS:0;c7c455b68129:43699 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:25:38,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43699-0x10000736c570001, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:25:38,814 INFO [RS:0;c7c455b68129:43699 {}] regionserver.HRegionServer(1031): Exiting; stopping=c7c455b68129,43699,1733577893481; zookeeper connection closed. 2024-12-07T13:25:38,814 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f5f9186 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f5f9186 2024-12-07T13:25:38,815 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-07T13:25:38,837 DEBUG [M:0;c7c455b68129:46593 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d805ba20db1544f2a0524cb24a3e6442 is 52, key is load_balancer_on/state:d/1733577894720/Put/seqid=0 2024-12-07T13:25:38,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741907_1093 (size=5056) 2024-12-07T13:25:38,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741907_1093 (size=5056) 2024-12-07T13:25:38,842 INFO [M:0;c7c455b68129:46593 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d805ba20db1544f2a0524cb24a3e6442 2024-12-07T13:25:38,847 DEBUG [M:0;c7c455b68129:46593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/553b333ea70c40cdb472eda49c8ce9b4 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/553b333ea70c40cdb472eda49c8ce9b4 2024-12-07T13:25:38,852 INFO [M:0;c7c455b68129:46593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/553b333ea70c40cdb472eda49c8ce9b4, entries=8, sequenceid=60, filesize=5.5 K 2024-12-07T13:25:38,854 DEBUG [M:0;c7c455b68129:46593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9ba1eb0afae04442b2865fcb04cfe475 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9ba1eb0afae04442b2865fcb04cfe475 2024-12-07T13:25:38,859 INFO [M:0;c7c455b68129:46593 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9ba1eb0afae04442b2865fcb04cfe475 2024-12-07T13:25:38,859 INFO [M:0;c7c455b68129:46593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9ba1eb0afae04442b2865fcb04cfe475, entries=6, sequenceid=60, filesize=6.1 K 2024-12-07T13:25:38,860 DEBUG [M:0;c7c455b68129:46593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d86c954bd680412bbd2a612ecb73bcf0 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d86c954bd680412bbd2a612ecb73bcf0 2024-12-07T13:25:38,865 INFO [M:0;c7c455b68129:46593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d86c954bd680412bbd2a612ecb73bcf0, entries=2, sequenceid=60, filesize=5.1 K 2024-12-07T13:25:38,866 DEBUG [M:0;c7c455b68129:46593 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d805ba20db1544f2a0524cb24a3e6442 as hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d805ba20db1544f2a0524cb24a3e6442 2024-12-07T13:25:38,871 INFO [M:0;c7c455b68129:46593 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d805ba20db1544f2a0524cb24a3e6442, entries=1, sequenceid=60, filesize=4.9 K 2024-12-07T13:25:38,872 INFO [M:0;c7c455b68129:46593 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=60, compaction requested=false 2024-12-07T13:25:38,873 INFO [M:0;c7c455b68129:46593 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:25:38,873 DEBUG [M:0;c7c455b68129:46593 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733577938746Disabling compacts and flushes for region at 1733577938746Disabling writes for close at 1733577938747 (+1 ms)Obtaining lock to block concurrent updates at 1733577938747Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733577938747Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1733577938747Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733577938748 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733577938748Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733577938762 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733577938762Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733577938773 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733577938784 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733577938784Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733577938795 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733577938807 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733577938807Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733577938818 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733577938836 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733577938837 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64ed81f: reopening flushed file at 1733577938846 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d214b0a: reopening flushed file at 1733577938853 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4577564d: reopening flushed file at 1733577938859 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@325e41bf: reopening flushed file at 1733577938865 (+6 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=60, compaction requested=false at 1733577938872 (+7 ms)Writing region close event to WAL at 1733577938873 (+1 ms)Closed at 1733577938873 2024-12-07T13:25:38,873 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:38,873 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:38,873 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:38,874 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:38,874 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:38,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741889_1072 (size=1045) 2024-12-07T13:25:38,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39707 is added to blk_1073741889_1072 (size=1045) 2024-12-07T13:25:39,058 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:25:39,075 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:39,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:39,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:39,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:39,076 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:39,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:39,079 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:39,080 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:39,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:39,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:39,994 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4c97175 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-959501690-172.17.0.3-1733577891118:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:41375,null,null]) java.net.ConnectException: Call From c7c455b68129/172.17.0.3 to localhost:33741 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-07T13:25:40,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:40,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:40,894 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/WALs/c7c455b68129,46593,1733577893287/c7c455b68129%2C46593%2C1733577893287.1733577893614 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/oldWALs/c7c455b68129%2C46593%2C1733577893287.1733577893614 2024-12-07T13:25:40,902 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/MasterData/oldWALs/c7c455b68129%2C46593%2C1733577893287.1733577893614 to hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/oldWALs/c7c455b68129%2C46593%2C1733577893287.1733577893614$masterlocalwal$ 2024-12-07T13:25:40,902 INFO [M:0;c7c455b68129:46593 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T13:25:40,902 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:25:40,903 INFO [M:0;c7c455b68129:46593 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46593 2024-12-07T13:25:40,903 INFO [M:0;c7c455b68129:46593 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:25:41,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:25:41,056 INFO [M:0;c7c455b68129:46593 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:25:41,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46593-0x10000736c570000, quorum=127.0.0.1:55405, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:25:41,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@34e71a9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:41,096 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f0b77b3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:25:41,096 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:25:41,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16711f95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:25:41,096 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@366bb257{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,STOPPED} 2024-12-07T13:25:41,098 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:25:41,098 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-959501690-172.17.0.3-1733577891118 (Datanode Uuid bafc61d8-ce26-4093-91ed-a19abb4f3d38) service to localhost/127.0.0.1:35785 2024-12-07T13:25:41,098 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c13b1ce {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-959501690-172.17.0.3-1733577891118:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:41375,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:33741 , LocalHost:localPort c7c455b68129/172.17.0.3:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-07T13:25:41,099 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c13b1ce {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-959501690-172.17.0.3-1733577891118:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:39707,null,null], DatanodeInfoWithStorage[127.0.0.1:41375,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-959501690-172.17.0.3-1733577891118 2024-12-07T13:25:41,099 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c13b1ce {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:41375,null,null]) java.io.IOException: No block pool offer service for bpid=BP-959501690-172.17.0.3-1733577891118 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:41,100 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data3/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:41,100 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c13b1ce {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39707,null,null]) java.io.IOException: No block pool offer service for bpid=BP-959501690-172.17.0.3-1733577891118 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:41,100 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7c13b1ce {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:41375,null,null], DatanodeInfoWithStorage[127.0.0.1:39707,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-959501690-172.17.0.3-1733577891118:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:41375,null,null], DatanodeInfoWithStorage[127.0.0.1:39707,null,null]] 2024-12-07T13:25:41,100 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:25:41,101 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:25:41,101 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data4/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:41,101 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:25:41,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@15751333{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:41,105 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@184b7b23{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:25:41,105 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:25:41,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cac6b83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:25:41,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45d50f98{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,STOPPED} 2024-12-07T13:25:41,106 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:25:41,106 WARN [BP-959501690-172.17.0.3-1733577891118 heartbeating to localhost/127.0.0.1:35785 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-959501690-172.17.0.3-1733577891118 (Datanode Uuid 75be824e-cc7b-46b3-a1bc-5cae5177bdc9) service to localhost/127.0.0.1:35785 2024-12-07T13:25:41,106 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data5/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:41,107 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/cluster_3b40d539-567c-d4e1-b985-613e875fc041/data/data6/current/BP-959501690-172.17.0.3-1733577891118 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:41,107 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:25:41,107 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:25:41,107 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:25:41,112 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c461833{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:25:41,112 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33e53d1d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:25:41,112 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:25:41,112 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ab5393f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:25:41,112 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@692b8c40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir/,STOPPED} 2024-12-07T13:25:41,119 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T13:25:41,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T13:25:41,166 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:35785 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35785 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:35785 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35785 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35785 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35785 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$893/0x00007f0564bef150.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35785 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35785 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$893/0x00007f0564bef150.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35785 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:42747 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42747 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35785 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35785 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=75 (was 156), ProcessCount=11 (was 11), AvailableMemoryMB=15635 (was 16068) 2024-12-07T13:25:41,172 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=75, ProcessCount=11, AvailableMemoryMB=15635 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.log.dir so I do NOT create it in target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3a4fca0d-a22f-07a3-d286-965b16cc613d/hadoop.tmp.dir so I do NOT create it in target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115, deleteOnExit=true 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/test.cache.data in system properties and HBase conf 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir in system properties and HBase conf 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T13:25:41,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T13:25:41,173 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/nfs.dump.dir in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/java.io.tmpdir in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:25:41,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T13:25:41,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T13:25:41,187 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:25:41,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:41,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:41,529 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:25:41,533 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:25:41,535 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:25:41,535 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:25:41,535 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:25:41,535 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:25:41,536 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@482b04db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:25:41,536 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60017892{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:25:41,624 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@37fa3e5a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/java.io.tmpdir/jetty-localhost-41283-hadoop-hdfs-3_4_1-tests_jar-_-any-8329953234902210500/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:25:41,624 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21865735{HTTP/1.1, (http/1.1)}{localhost:41283} 2024-12-07T13:25:41,624 INFO [Time-limited test {}] server.Server(415): Started @159465ms 2024-12-07T13:25:41,634 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:25:41,890 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:25:41,893 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:25:41,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:25:41,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:25:41,894 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:25:41,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ab2649c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:25:41,895 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e8ebafe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:25:41,985 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1fe90fca{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/java.io.tmpdir/jetty-localhost-40937-hadoop-hdfs-3_4_1-tests_jar-_-any-9253131450739351830/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:41,986 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a724f0c{HTTP/1.1, (http/1.1)}{localhost:40937} 2024-12-07T13:25:41,986 INFO [Time-limited test {}] server.Server(415): Started @159827ms 2024-12-07T13:25:41,988 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:25:42,015 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:25:42,017 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:25:42,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:25:42,018 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:25:42,018 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:25:42,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@171fa0fa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:25:42,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16a8f4c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:25:42,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d22a8c2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/java.io.tmpdir/jetty-localhost-38535-hadoop-hdfs-3_4_1-tests_jar-_-any-15651722712105711921/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:42,108 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3418a5f1{HTTP/1.1, (http/1.1)}{localhost:38535} 2024-12-07T13:25:42,108 INFO [Time-limited test {}] server.Server(415): Started @159948ms 2024-12-07T13:25:42,109 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:25:42,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:42,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:43,228 WARN [Thread-1195 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data1/current/BP-1206754118-172.17.0.3-1733577941197/current, will proceed with Du for space computation calculation, 2024-12-07T13:25:43,228 WARN [Thread-1196 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data2/current/BP-1206754118-172.17.0.3-1733577941197/current, will proceed with Du for space computation calculation, 2024-12-07T13:25:43,248 WARN [Thread-1159 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:25:43,250 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7a4162ad5f7e6353 with lease ID 0x779aeaa5f39a5ddd: Processing first storage report for DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8 from datanode DatanodeRegistration(127.0.0.1:45957, datanodeUuid=b9d6c729-cbc2-4be8-992a-a507464226a8, infoPort=39951, infoSecurePort=0, ipcPort=35547, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197) 2024-12-07T13:25:43,250 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7a4162ad5f7e6353 with lease ID 0x779aeaa5f39a5ddd: from storage DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8 node DatanodeRegistration(127.0.0.1:45957, datanodeUuid=b9d6c729-cbc2-4be8-992a-a507464226a8, infoPort=39951, infoSecurePort=0, ipcPort=35547, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:43,250 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7a4162ad5f7e6353 with lease ID 0x779aeaa5f39a5ddd: Processing first storage report for DS-4af01ffe-413c-4a61-b722-588b40b17a83 from datanode DatanodeRegistration(127.0.0.1:45957, datanodeUuid=b9d6c729-cbc2-4be8-992a-a507464226a8, infoPort=39951, infoSecurePort=0, ipcPort=35547, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197) 2024-12-07T13:25:43,250 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7a4162ad5f7e6353 with lease ID 0x779aeaa5f39a5ddd: from storage DS-4af01ffe-413c-4a61-b722-588b40b17a83 node DatanodeRegistration(127.0.0.1:45957, datanodeUuid=b9d6c729-cbc2-4be8-992a-a507464226a8, infoPort=39951, infoSecurePort=0, ipcPort=35547, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:43,268 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data3/current/BP-1206754118-172.17.0.3-1733577941197/current, will proceed with Du for space computation calculation, 2024-12-07T13:25:43,268 WARN [Thread-1207 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data4/current/BP-1206754118-172.17.0.3-1733577941197/current, will proceed with Du for space computation calculation, 2024-12-07T13:25:43,290 WARN [Thread-1182 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:25:43,291 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb3d26626d7e24c2 with lease ID 0x779aeaa5f39a5dde: Processing first storage report for DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176 from datanode DatanodeRegistration(127.0.0.1:33149, datanodeUuid=77bea1c3-c274-4930-8d15-a57b4458257f, infoPort=33183, infoSecurePort=0, ipcPort=41183, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197) 2024-12-07T13:25:43,292 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb3d26626d7e24c2 with lease ID 0x779aeaa5f39a5dde: from storage DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176 node DatanodeRegistration(127.0.0.1:33149, datanodeUuid=77bea1c3-c274-4930-8d15-a57b4458257f, infoPort=33183, infoSecurePort=0, ipcPort=41183, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:43,292 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfb3d26626d7e24c2 with lease ID 0x779aeaa5f39a5dde: Processing first storage report for DS-23dcda92-ae07-48fa-beaf-1da0c14e884a from datanode DatanodeRegistration(127.0.0.1:33149, datanodeUuid=77bea1c3-c274-4930-8d15-a57b4458257f, infoPort=33183, infoSecurePort=0, ipcPort=41183, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197) 2024-12-07T13:25:43,292 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfb3d26626d7e24c2 with lease ID 0x779aeaa5f39a5dde: from storage DS-23dcda92-ae07-48fa-beaf-1da0c14e884a node DatanodeRegistration(127.0.0.1:33149, datanodeUuid=77bea1c3-c274-4930-8d15-a57b4458257f, infoPort=33183, infoSecurePort=0, ipcPort=41183, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:43,343 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5 2024-12-07T13:25:43,348 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/zookeeper_0, clientPort=49551, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T13:25:43,350 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49551 2024-12-07T13:25:43,350 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:25:43,353 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:25:43,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45957 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:25:43,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:25:43,364 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981 with version=8 2024-12-07T13:25:43,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/hbase-staging 2024-12-07T13:25:43,367 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:25:43,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:25:43,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:25:43,367 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:25:43,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:25:43,367 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:25:43,367 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T13:25:43,368 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:25:43,368 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46131 2024-12-07T13:25:43,370 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46131 connecting to ZooKeeper ensemble=127.0.0.1:49551 2024-12-07T13:25:43,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:461310x0, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:25:43,448 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46131-0x10000742ff90000 connected 2024-12-07T13:25:43,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:43,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:43,599 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:25:43,604 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:25:43,609 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:25:43,610 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981, hbase.cluster.distributed=false 2024-12-07T13:25:43,611 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:25:43,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46131 2024-12-07T13:25:43,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46131 2024-12-07T13:25:43,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46131 2024-12-07T13:25:43,613 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46131 2024-12-07T13:25:43,613 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46131 2024-12-07T13:25:43,625 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:25:43,625 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:25:43,625 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:25:43,625 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:25:43,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:25:43,626 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:25:43,626 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T13:25:43,626 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:25:43,626 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:40591 2024-12-07T13:25:43,627 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40591 connecting to ZooKeeper ensemble=127.0.0.1:49551 2024-12-07T13:25:43,628 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:25:43,629 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:25:43,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405910x0, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:25:43,640 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:405910x0, quorum=127.0.0.1:49551, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:25:43,640 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40591-0x10000742ff90001 connected 2024-12-07T13:25:43,641 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T13:25:43,641 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T13:25:43,641 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T13:25:43,642 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:25:43,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40591 2024-12-07T13:25:43,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40591 2024-12-07T13:25:43,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40591 2024-12-07T13:25:43,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40591 2024-12-07T13:25:43,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40591 2024-12-07T13:25:43,656 DEBUG [M:0;c7c455b68129:46131 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c7c455b68129:46131 2024-12-07T13:25:43,657 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c7c455b68129,46131,1733577943366 2024-12-07T13:25:43,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:25:43,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:25:43,667 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c7c455b68129,46131,1733577943366 2024-12-07T13:25:43,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T13:25:43,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:43,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:43,678 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T13:25:43,679 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c7c455b68129,46131,1733577943366 from backup master directory 2024-12-07T13:25:43,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c7c455b68129,46131,1733577943366 2024-12-07T13:25:43,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:25:43,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:25:43,688 WARN [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:25:43,688 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c7c455b68129,46131,1733577943366 2024-12-07T13:25:43,696 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/hbase.id] with ID: b73fa44c-7d55-41e7-b835-c76893bce628 2024-12-07T13:25:43,696 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/.tmp/hbase.id 2024-12-07T13:25:43,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:25:43,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45957 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:25:43,702 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/.tmp/hbase.id]:[hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/hbase.id] 2024-12-07T13:25:43,715 INFO [master/c7c455b68129:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:25:43,715 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T13:25:43,716 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-07T13:25:43,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:43,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:43,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45957 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:25:43,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:25:43,732 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:25:43,733 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T13:25:43,733 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:25:43,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45957 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:25:43,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:25:43,742 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store 2024-12-07T13:25:43,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:25:43,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45957 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:25:43,748 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:25:43,748 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:25:43,748 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:25:43,748 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:25:43,748 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:25:43,748 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:25:43,748 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:25:43,748 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733577943748Disabling compacts and flushes for region at 1733577943748Disabling writes for close at 1733577943748Writing region close event to WAL at 1733577943748Closed at 1733577943748 2024-12-07T13:25:43,749 WARN [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/.initializing 2024-12-07T13:25:43,749 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366 2024-12-07T13:25:43,752 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C46131%2C1733577943366, suffix=, logDir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366, archiveDir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/oldWALs, maxLogs=10 2024-12-07T13:25:43,752 INFO [master/c7c455b68129:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C46131%2C1733577943366.1733577943752 2024-12-07T13:25:43,756 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366/c7c455b68129%2C46131%2C1733577943366.1733577943752 2024-12-07T13:25:43,761 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33183:33183),(127.0.0.1/127.0.0.1:39951:39951)] 2024-12-07T13:25:43,761 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:25:43,762 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:25:43,762 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,762 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,763 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,764 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T13:25:43,764 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:43,764 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:25:43,764 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,765 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T13:25:43,765 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:43,766 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:25:43,766 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T13:25:43,767 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:43,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:25:43,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,768 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T13:25:43,768 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:43,768 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:25:43,769 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,769 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,769 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,771 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,771 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,771 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T13:25:43,772 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:25:43,774 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:25:43,774 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698396, jitterRate=-0.1119445413351059}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T13:25:43,774 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733577943762Initializing all the Stores at 1733577943763 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577943763Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577943763Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577943763Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577943763Cleaning up temporary data from old regions at 1733577943771 (+8 ms)Region opened successfully at 1733577943774 (+3 ms) 2024-12-07T13:25:43,775 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T13:25:43,777 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@243a5a89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:25:43,778 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T13:25:43,778 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T13:25:43,778 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T13:25:43,778 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T13:25:43,779 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T13:25:43,779 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T13:25:43,779 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T13:25:43,782 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T13:25:43,783 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T13:25:43,793 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T13:25:43,793 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T13:25:43,794 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T13:25:43,803 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T13:25:43,804 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T13:25:43,805 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T13:25:43,814 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T13:25:43,815 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T13:25:43,824 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T13:25:43,826 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T13:25:43,835 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T13:25:43,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:25:43,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:25:43,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:43,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:43,846 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c7c455b68129,46131,1733577943366, sessionid=0x10000742ff90000, setting cluster-up flag (Was=false) 2024-12-07T13:25:43,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:43,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:43,898 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T13:25:43,900 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,46131,1733577943366 2024-12-07T13:25:43,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:43,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:43,934 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:25:43,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:43,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:43,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:43,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:43,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:43,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:43,951 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T13:25:43,952 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,46131,1733577943366 2024-12-07T13:25:43,953 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T13:25:43,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:43,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:43,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:43,954 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T13:25:43,955 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T13:25:43,955 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T13:25:43,955 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c7c455b68129,46131,1733577943366 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T13:25:43,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:43,956 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:25:43,956 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:25:43,956 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:25:43,956 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:25:43,957 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c7c455b68129:0, corePoolSize=10, maxPoolSize=10 2024-12-07T13:25:43,957 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:43,957 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:25:43,957 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:43,958 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:25:43,958 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T13:25:43,959 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:43,959 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T13:25:43,960 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733577973960 2024-12-07T13:25:43,960 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T13:25:43,960 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T13:25:43,960 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T13:25:43,960 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T13:25:43,961 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T13:25:43,961 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T13:25:43,961 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:43,965 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T13:25:43,965 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T13:25:43,965 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T13:25:43,965 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T13:25:43,965 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T13:25:43,965 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577943965,5,FailOnTimeoutGroup] 2024-12-07T13:25:43,966 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577943965,5,FailOnTimeoutGroup] 2024-12-07T13:25:43,966 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:43,966 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T13:25:43,966 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:43,966 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:43,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:25:43,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45957 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:25:43,971 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T13:25:43,972 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981 2024-12-07T13:25:43,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:25:43,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45957 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:25:43,983 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:25:43,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:25:43,986 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:25:43,986 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:43,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:25:43,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:25:43,988 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:25:43,988 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:43,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:25:43,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:25:43,990 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:25:43,990 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:43,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:25:43,990 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:25:43,992 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:25:43,992 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:43,992 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:25:43,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:25:43,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740 2024-12-07T13:25:43,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740 2024-12-07T13:25:43,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:25:43,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:25:43,995 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:25:43,996 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:25:43,997 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:25:43,998 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859074, jitterRate=0.09236934781074524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:25:43,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733577943983Initializing all the Stores at 1733577943984 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577943984Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577943984Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577943984Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577943984Cleaning up temporary data from old regions at 1733577943994 (+10 ms)Region opened successfully at 1733577943998 (+4 ms) 2024-12-07T13:25:43,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:25:43,999 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:25:43,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:25:43,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:25:43,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:25:43,999 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:25:43,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733577943998Disabling compacts and flushes for region at 1733577943998Disabling writes for close at 1733577943999 (+1 ms)Writing region close event to WAL at 1733577943999Closed at 1733577943999 2024-12-07T13:25:44,000 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:25:44,000 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T13:25:44,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T13:25:44,002 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:25:44,003 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T13:25:44,046 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(746): ClusterId : b73fa44c-7d55-41e7-b835-c76893bce628 2024-12-07T13:25:44,046 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T13:25:44,110 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T13:25:44,110 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T13:25:44,144 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T13:25:44,145 DEBUG [RS:0;c7c455b68129:40591 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11555a96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:25:44,153 WARN [c7c455b68129:46131 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T13:25:44,166 DEBUG [RS:0;c7c455b68129:40591 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c7c455b68129:40591 2024-12-07T13:25:44,166 INFO [RS:0;c7c455b68129:40591 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T13:25:44,166 INFO [RS:0;c7c455b68129:40591 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T13:25:44,166 DEBUG [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T13:25:44,167 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(2659): reportForDuty to master=c7c455b68129,46131,1733577943366 with port=40591, startcode=1733577943625 2024-12-07T13:25:44,168 DEBUG [RS:0;c7c455b68129:40591 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T13:25:44,169 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58175, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T13:25:44,170 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46131 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c7c455b68129,40591,1733577943625 2024-12-07T13:25:44,170 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46131 {}] master.ServerManager(517): Registering regionserver=c7c455b68129,40591,1733577943625 2024-12-07T13:25:44,171 DEBUG [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981 2024-12-07T13:25:44,171 DEBUG [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39881 2024-12-07T13:25:44,172 DEBUG [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T13:25:44,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:25:44,183 DEBUG [RS:0;c7c455b68129:40591 {}] zookeeper.ZKUtil(111): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c7c455b68129,40591,1733577943625 2024-12-07T13:25:44,183 WARN [RS:0;c7c455b68129:40591 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:25:44,183 INFO [RS:0;c7c455b68129:40591 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:25:44,183 DEBUG [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625 2024-12-07T13:25:44,184 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c7c455b68129,40591,1733577943625] 2024-12-07T13:25:44,187 INFO [RS:0;c7c455b68129:40591 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T13:25:44,188 INFO [RS:0;c7c455b68129:40591 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T13:25:44,188 INFO [RS:0;c7c455b68129:40591 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T13:25:44,188 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,188 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T13:25:44,189 INFO [RS:0;c7c455b68129:40591 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T13:25:44,189 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,189 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,189 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,189 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:25:44,190 DEBUG [RS:0;c7c455b68129:40591 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:25:44,190 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,190 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,190 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,190 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,191 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,191 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,40591,1733577943625-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:25:44,205 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T13:25:44,205 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,40591,1733577943625-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,205 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,205 INFO [RS:0;c7c455b68129:40591 {}] regionserver.Replication(171): c7c455b68129,40591,1733577943625 started 2024-12-07T13:25:44,216 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,216 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(1482): Serving as c7c455b68129,40591,1733577943625, RpcServer on c7c455b68129/172.17.0.3:40591, sessionid=0x10000742ff90001 2024-12-07T13:25:44,216 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T13:25:44,216 DEBUG [RS:0;c7c455b68129:40591 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c7c455b68129,40591,1733577943625 2024-12-07T13:25:44,216 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,40591,1733577943625' 2024-12-07T13:25:44,216 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T13:25:44,216 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T13:25:44,217 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T13:25:44,217 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T13:25:44,217 DEBUG [RS:0;c7c455b68129:40591 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c7c455b68129,40591,1733577943625 2024-12-07T13:25:44,217 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,40591,1733577943625' 2024-12-07T13:25:44,217 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T13:25:44,217 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T13:25:44,218 DEBUG [RS:0;c7c455b68129:40591 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T13:25:44,218 INFO [RS:0;c7c455b68129:40591 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T13:25:44,218 INFO [RS:0;c7c455b68129:40591 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T13:25:44,322 INFO [RS:0;c7c455b68129:40591 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C40591%2C1733577943625, suffix=, logDir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625, archiveDir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/oldWALs, maxLogs=32 2024-12-07T13:25:44,324 INFO [RS:0;c7c455b68129:40591 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C40591%2C1733577943625.1733577944324 2024-12-07T13:25:44,331 INFO [RS:0;c7c455b68129:40591 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 2024-12-07T13:25:44,335 DEBUG [RS:0;c7c455b68129:40591 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39951:39951),(127.0.0.1/127.0.0.1:33183:33183)] 2024-12-07T13:25:44,403 DEBUG [c7c455b68129:46131 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T13:25:44,404 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c7c455b68129,40591,1733577943625 2024-12-07T13:25:44,405 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,40591,1733577943625, state=OPENING 2024-12-07T13:25:44,414 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T13:25:44,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:44,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:25:44,425 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:25:44,425 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:25:44,425 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:25:44,426 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,40591,1733577943625}] 2024-12-07T13:25:44,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:44,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:44,581 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T13:25:44,584 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:45341, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T13:25:44,590 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T13:25:44,590 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:25:44,592 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C40591%2C1733577943625.meta, suffix=.meta, logDir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625, archiveDir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/oldWALs, maxLogs=32 2024-12-07T13:25:44,593 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta 2024-12-07T13:25:44,598 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta 2024-12-07T13:25:44,601 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33183:33183),(127.0.0.1/127.0.0.1:39951:39951)] 2024-12-07T13:25:44,602 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:25:44,602 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T13:25:44,602 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T13:25:44,602 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T13:25:44,602 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T13:25:44,602 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:25:44,602 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T13:25:44,602 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T13:25:44,604 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:25:44,605 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:25:44,605 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:44,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:25:44,605 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:25:44,606 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:25:44,606 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:44,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:25:44,606 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:25:44,607 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:25:44,607 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:44,607 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:25:44,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:25:44,608 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:25:44,608 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:44,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:25:44,609 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:25:44,609 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740 2024-12-07T13:25:44,611 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740 2024-12-07T13:25:44,612 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:25:44,612 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:25:44,613 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:25:44,614 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:25:44,615 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=828608, jitterRate=0.05362983047962189}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:25:44,615 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T13:25:44,615 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733577944602Writing region info on filesystem at 1733577944602Initializing all the Stores at 1733577944603 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577944603Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577944604 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577944604Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577944604Cleaning up temporary data from old regions at 1733577944612 (+8 ms)Running coprocessor post-open hooks at 1733577944615 (+3 ms)Region opened successfully at 1733577944615 2024-12-07T13:25:44,616 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733577944581 2024-12-07T13:25:44,618 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T13:25:44,619 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T13:25:44,619 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,40591,1733577943625 2024-12-07T13:25:44,620 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,40591,1733577943625, state=OPEN 2024-12-07T13:25:44,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:25:44,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:25:44,712 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c7c455b68129,40591,1733577943625 2024-12-07T13:25:44,712 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:25:44,712 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:25:44,720 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T13:25:44,720 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,40591,1733577943625 in 287 msec 2024-12-07T13:25:44,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T13:25:44,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 719 msec 2024-12-07T13:25:44,724 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:25:44,724 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T13:25:44,725 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:25:44,725 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,40591,1733577943625, seqNum=-1] 2024-12-07T13:25:44,726 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:25:44,727 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54499, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:25:44,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 777 msec 2024-12-07T13:25:44,732 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733577944732, completionTime=-1 2024-12-07T13:25:44,732 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T13:25:44,732 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T13:25:44,734 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T13:25:44,734 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733578004734 2024-12-07T13:25:44,734 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733578064734 2024-12-07T13:25:44,734 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-07T13:25:44,734 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46131,1733577943366-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,734 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46131,1733577943366-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,735 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46131,1733577943366-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,735 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c7c455b68129:46131, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,735 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,735 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,736 DEBUG [master/c7c455b68129:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T13:25:44,739 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.050sec 2024-12-07T13:25:44,739 INFO [master/c7c455b68129:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T13:25:44,739 INFO [master/c7c455b68129:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T13:25:44,739 INFO [master/c7c455b68129:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T13:25:44,739 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T13:25:44,739 INFO [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T13:25:44,739 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46131,1733577943366-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:25:44,739 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46131,1733577943366-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T13:25:44,742 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T13:25:44,742 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T13:25:44,742 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,46131,1733577943366-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:25:44,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27265229, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:25:44,746 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c7c455b68129,46131,-1 for getting cluster id 2024-12-07T13:25:44,746 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T13:25:44,748 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b73fa44c-7d55-41e7-b835-c76893bce628' 2024-12-07T13:25:44,749 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T13:25:44,749 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b73fa44c-7d55-41e7-b835-c76893bce628" 2024-12-07T13:25:44,749 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2539d110, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:25:44,749 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c7c455b68129,46131,-1] 2024-12-07T13:25:44,749 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T13:25:44,750 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:25:44,751 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38550, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T13:25:44,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@542bdd12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:25:44,752 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:25:44,753 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,40591,1733577943625, seqNum=-1] 2024-12-07T13:25:44,754 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:25:44,756 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48448, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:25:44,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c7c455b68129,46131,1733577943366 2024-12-07T13:25:44,758 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:25:44,760 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T13:25:44,760 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-07T13:25:44,760 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-07T13:25:44,760 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T13:25:44,761 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is c7c455b68129,46131,1733577943366 2024-12-07T13:25:44,761 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@441e9c57 2024-12-07T13:25:44,761 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T13:25:44,763 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38566, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T13:25:44,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46131 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-07T13:25:44,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46131 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-07T13:25:44,764 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46131 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:25:44,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46131 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-07T13:25:44,766 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T13:25:44,766 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:44,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46131 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-07T13:25:44,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46131 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T13:25:44,768 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T13:25:44,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741835_1011 (size=395) 2024-12-07T13:25:44,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45957 is added to blk_1073741835_1011 (size=395) 2024-12-07T13:25:44,778 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 29aae1c60a6c9ce3ac6d0c7cf740706e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981 2024-12-07T13:25:44,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45957 is added to blk_1073741836_1012 (size=78) 2024-12-07T13:25:44,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33149 is added to blk_1073741836_1012 (size=78) 2024-12-07T13:25:44,786 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:25:44,786 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 29aae1c60a6c9ce3ac6d0c7cf740706e, disabling compactions & flushes 2024-12-07T13:25:44,786 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:25:44,786 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:25:44,786 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. after waiting 0 ms 2024-12-07T13:25:44,786 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:25:44,787 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:25:44,787 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 29aae1c60a6c9ce3ac6d0c7cf740706e: Waiting for close lock at 1733577944786Disabling compacts and flushes for region at 1733577944786Disabling writes for close at 1733577944786Writing region close event to WAL at 1733577944787 (+1 ms)Closed at 1733577944787 2024-12-07T13:25:44,788 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T13:25:44,789 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733577944788"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733577944788"}]},"ts":"1733577944788"} 2024-12-07T13:25:44,792 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T13:25:44,793 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T13:25:44,793 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733577944793"}]},"ts":"1733577944793"} 2024-12-07T13:25:44,796 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-07T13:25:44,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=29aae1c60a6c9ce3ac6d0c7cf740706e, ASSIGN}] 2024-12-07T13:25:44,798 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=29aae1c60a6c9ce3ac6d0c7cf740706e, ASSIGN 2024-12-07T13:25:44,799 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=29aae1c60a6c9ce3ac6d0c7cf740706e, ASSIGN; state=OFFLINE, location=c7c455b68129,40591,1733577943625; forceNewPlan=false, retain=false 2024-12-07T13:25:44,951 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=29aae1c60a6c9ce3ac6d0c7cf740706e, regionState=OPENING, regionLocation=c7c455b68129,40591,1733577943625 2024-12-07T13:25:44,956 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=29aae1c60a6c9ce3ac6d0c7cf740706e, ASSIGN because future has completed 2024-12-07T13:25:44,957 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29aae1c60a6c9ce3ac6d0c7cf740706e, server=c7c455b68129,40591,1733577943625}] 2024-12-07T13:25:45,116 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:25:45,116 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 29aae1c60a6c9ce3ac6d0c7cf740706e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:25:45,116 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,116 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:25:45,116 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,116 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,118 INFO [StoreOpener-29aae1c60a6c9ce3ac6d0c7cf740706e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,119 INFO [StoreOpener-29aae1c60a6c9ce3ac6d0c7cf740706e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 29aae1c60a6c9ce3ac6d0c7cf740706e columnFamilyName info 2024-12-07T13:25:45,119 DEBUG [StoreOpener-29aae1c60a6c9ce3ac6d0c7cf740706e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:25:45,120 INFO [StoreOpener-29aae1c60a6c9ce3ac6d0c7cf740706e-1 {}] regionserver.HStore(327): Store=29aae1c60a6c9ce3ac6d0c7cf740706e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:25:45,120 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,121 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/default/TestLogRolling-testLogRollOnPipelineRestart/29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,122 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/default/TestLogRolling-testLogRollOnPipelineRestart/29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,122 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,122 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,124 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,126 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/default/TestLogRolling-testLogRollOnPipelineRestart/29aae1c60a6c9ce3ac6d0c7cf740706e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:25:45,127 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 29aae1c60a6c9ce3ac6d0c7cf740706e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785451, jitterRate=-0.0012486428022384644}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T13:25:45,127 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:25:45,128 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 29aae1c60a6c9ce3ac6d0c7cf740706e: Running coprocessor pre-open hook at 1733577945116Writing region info on filesystem at 1733577945116Initializing all the Stores at 1733577945117 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577945117Cleaning up temporary data from old regions at 1733577945122 (+5 ms)Running coprocessor post-open hooks at 1733577945127 (+5 ms)Region opened successfully at 1733577945128 (+1 ms) 2024-12-07T13:25:45,129 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e., pid=6, masterSystemTime=1733577945111 2024-12-07T13:25:45,132 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:25:45,132 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:25:45,133 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=29aae1c60a6c9ce3ac6d0c7cf740706e, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,40591,1733577943625 2024-12-07T13:25:45,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 29aae1c60a6c9ce3ac6d0c7cf740706e, server=c7c455b68129,40591,1733577943625 because future has completed 2024-12-07T13:25:45,139 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T13:25:45,139 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 29aae1c60a6c9ce3ac6d0c7cf740706e, server=c7c455b68129,40591,1733577943625 in 179 msec 2024-12-07T13:25:45,141 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T13:25:45,141 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=29aae1c60a6c9ce3ac6d0c7cf740706e, ASSIGN in 343 msec 2024-12-07T13:25:45,142 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T13:25:45,143 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733577945142"}]},"ts":"1733577945142"} 2024-12-07T13:25:45,145 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-07T13:25:45,146 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T13:25:45,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 382 msec 2024-12-07T13:25:45,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:45,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:46,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:46,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:47,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:47,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:48,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T13:25:48,427 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T13:25:48,429 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-07T13:25:48,429 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-07T13:25:48,431 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:25:48,431 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T13:25:48,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:48,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:49,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:49,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:50,106 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:25:50,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:50,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:50,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:50,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:50,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:50,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:50,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:50,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:50,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:50,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:25:50,187 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T13:25:50,187 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-07T13:25:50,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:50,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:51,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:51,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:52,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:52,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:53,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:53,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:54,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:54,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:54,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46131 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T13:25:54,787 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-07T13:25:54,787 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-07T13:25:54,795 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-07T13:25:54,795 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:25:54,801 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e., hostname=c7c455b68129,40591,1733577943625, seqNum=2] 2024-12-07T13:25:55,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:55,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:56,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:56,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:56,804 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 2024-12-07T13:25:56,806 WARN [ResponseProcessor for block BP-1206754118-172.17.0.3-1733577941197:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1206754118-172.17.0.3-1733577941197:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:56,806 WARN [ResponseProcessor for block BP-1206754118-172.17.0.3-1733577941197:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1206754118-172.17.0.3-1733577941197:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:56,806 WARN [ResponseProcessor for block BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:33149,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:56,807 WARN [DataStreamer for file /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta block BP-1206754118-172.17.0.3-1733577941197:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1206754118-172.17.0.3-1733577941197:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33149,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK], DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33149,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK]) is bad. 2024-12-07T13:25:56,807 WARN [DataStreamer for file /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 block BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK], DatanodeInfoWithStorage[127.0.0.1:33149,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33149,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK]) is bad. 2024-12-07T13:25:56,807 WARN [DataStreamer for file /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366/c7c455b68129%2C46131%2C1733577943366.1733577943752 block BP-1206754118-172.17.0.3-1733577941197:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1206754118-172.17.0.3-1733577941197:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33149,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK], DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33149,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK]) is bad. 2024-12-07T13:25:56,807 WARN [PacketResponder: BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33149] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:56,808 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215074561_22 at /127.0.0.1:45130 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45130 dst: /127.0.0.1:33149 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:56,808 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016080570_22 at /127.0.0.1:45156 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45156 dst: /127.0.0.1:33149 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:56,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016080570_22 at /127.0.0.1:46842 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46842 dst: /127.0.0.1:45957 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:56,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215074561_22 at /127.0.0.1:46794 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46794 dst: /127.0.0.1:45957 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:56,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016080570_22 at /127.0.0.1:46830 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46830 dst: /127.0.0.1:45957 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:56,810 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016080570_22 at /127.0.0.1:45146 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45146 dst: /127.0.0.1:33149 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:56,927 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d22a8c2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:56,928 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3418a5f1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:25:56,929 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:25:56,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16a8f4c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:25:56,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@171fa0fa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,STOPPED} 2024-12-07T13:25:56,932 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:25:56,932 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:25:56,933 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1206754118-172.17.0.3-1733577941197 (Datanode Uuid 77bea1c3-c274-4930-8d15-a57b4458257f) service to localhost/127.0.0.1:39881 2024-12-07T13:25:56,933 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:25:56,934 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data3/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:56,935 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data4/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:56,935 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:25:56,944 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:25:56,948 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:25:56,948 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:25:56,948 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:25:56,949 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:25:56,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59e94e8a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:25:56,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@e979747{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:25:57,040 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59462733{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/java.io.tmpdir/jetty-localhost-38327-hadoop-hdfs-3_4_1-tests_jar-_-any-10551737893190622457/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:57,040 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f8a0d0d{HTTP/1.1, (http/1.1)}{localhost:38327} 2024-12-07T13:25:57,040 INFO [Time-limited test {}] server.Server(415): Started @174881ms 2024-12-07T13:25:57,041 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:25:57,062 WARN [ResponseProcessor for block BP-1206754118-172.17.0.3-1733577941197:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1206754118-172.17.0.3-1733577941197:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:57,062 WARN [ResponseProcessor for block BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:57,062 WARN [ResponseProcessor for block BP-1206754118-172.17.0.3-1733577941197:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1206754118-172.17.0.3-1733577941197:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:57,062 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016080570_22 at /127.0.0.1:51694 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51694 dst: /127.0.0.1:45957 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:57,062 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016080570_22 at /127.0.0.1:51680 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51680 dst: /127.0.0.1:45957 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:57,062 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215074561_22 at /127.0.0.1:51664 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45957:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51664 dst: /127.0.0.1:45957 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:25:57,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1fe90fca{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:57,066 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a724f0c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:25:57,066 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:25:57,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e8ebafe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:25:57,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ab2649c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,STOPPED} 2024-12-07T13:25:57,067 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:25:57,067 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:25:57,067 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1206754118-172.17.0.3-1733577941197 (Datanode Uuid b9d6c729-cbc2-4be8-992a-a507464226a8) service to localhost/127.0.0.1:39881 2024-12-07T13:25:57,067 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:25:57,067 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data1/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:57,068 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data2/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:25:57,068 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:25:57,074 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:25:57,077 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:25:57,078 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:25:57,078 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:25:57,078 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:25:57,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a40ff76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:25:57,079 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4022a798{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:25:57,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a362569{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/java.io.tmpdir/jetty-localhost-39599-hadoop-hdfs-3_4_1-tests_jar-_-any-574964661134079928/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:25:57,168 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@49f94f8{HTTP/1.1, (http/1.1)}{localhost:39599} 2024-12-07T13:25:57,168 INFO [Time-limited test {}] server.Server(415): Started @175009ms 2024-12-07T13:25:57,169 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:25:57,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:57,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:57,583 WARN [Thread-1331 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:25:57,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9602ba3d30e2ce8 with lease ID 0x779aeaa5f39a5ddf: from storage DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176 node DatanodeRegistration(127.0.0.1:44959, datanodeUuid=77bea1c3-c274-4930-8d15-a57b4458257f, infoPort=41955, infoSecurePort=0, ipcPort=33065, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:57,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9602ba3d30e2ce8 with lease ID 0x779aeaa5f39a5ddf: from storage DS-23dcda92-ae07-48fa-beaf-1da0c14e884a node DatanodeRegistration(127.0.0.1:44959, datanodeUuid=77bea1c3-c274-4930-8d15-a57b4458257f, infoPort=41955, infoSecurePort=0, ipcPort=33065, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:57,665 WARN [Thread-1351 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:25:57,668 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe07cc70e044ae6b4 with lease ID 0x779aeaa5f39a5de0: from storage DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8 node DatanodeRegistration(127.0.0.1:42657, datanodeUuid=b9d6c729-cbc2-4be8-992a-a507464226a8, infoPort=42585, infoSecurePort=0, ipcPort=41245, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:57,668 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe07cc70e044ae6b4 with lease ID 0x779aeaa5f39a5de0: from storage DS-4af01ffe-413c-4a61-b722-588b40b17a83 node DatanodeRegistration(127.0.0.1:42657, datanodeUuid=b9d6c729-cbc2-4be8-992a-a507464226a8, infoPort=42585, infoSecurePort=0, ipcPort=41245, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:25:58,187 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-07T13:25:58,193 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-07T13:25:58,195 ERROR [FSHLog-0-hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981-prefix:c7c455b68129,40591,1733577943625 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:58,195 WARN [FSHLog-0-hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981-prefix:c7c455b68129,40591,1733577943625 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:58,195 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C40591%2C1733577943625:(num 1733577944324) roll requested 2024-12-07T13:25:58,196 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C40591%2C1733577943625.1733577958196 2024-12-07T13:25:58,204 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 newFile=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 2024-12-07T13:25:58,205 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:58,205 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:58,205 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:58,205 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:58,205 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:25:58,206 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 2024-12-07T13:25:58,206 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:58,206 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:25:58,206 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 2024-12-07T13:25:58,206 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42585:42585),(127.0.0.1/127.0.0.1:41955:41955)] 2024-12-07T13:25:58,206 WARN [IPC Server handler 3 on default port 39881 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-12-07T13:25:58,207 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 is not closed yet, will try archiving it next time 2024-12-07T13:25:58,207 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 after 1ms 2024-12-07T13:25:58,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:58,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:59,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:25:59,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:00,212 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-07T13:26:00,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:00,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:00,589 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-07T13:26:01,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:01,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:02,209 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 after 4002ms 2024-12-07T13:26:02,219 WARN [ResponseProcessor for block BP-1206754118-172.17.0.3-1733577941197:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1206754118-172.17.0.3-1733577941197:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:02,220 WARN [DataStreamer for file /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 block BP-1206754118-172.17.0.3-1733577941197:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1206754118-172.17.0.3-1733577941197:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42657,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK], DatanodeInfoWithStorage[127.0.0.1:44959,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42657,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]) is bad. 2024-12-07T13:26:02,221 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016080570_22 at /127.0.0.1:38078 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38078 dst: /127.0.0.1:44959 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:26:02,221 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016080570_22 at /127.0.0.1:56826 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42657:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56826 dst: /127.0.0.1:42657 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:26:02,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a362569{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:26:02,263 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@49f94f8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:26:02,263 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:26:02,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4022a798{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:26:02,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a40ff76{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,STOPPED} 2024-12-07T13:26:02,265 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:26:02,265 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:26:02,265 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:26:02,265 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1206754118-172.17.0.3-1733577941197 (Datanode Uuid b9d6c729-cbc2-4be8-992a-a507464226a8) service to localhost/127.0.0.1:39881 2024-12-07T13:26:02,267 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data1/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:26:02,267 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data2/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:26:02,267 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:26:02,275 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:26:02,277 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:26:02,278 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:26:02,278 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:26:02,278 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:26:02,279 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61aceb4d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:26:02,279 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21d94b42{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:26:02,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@424d5648{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/java.io.tmpdir/jetty-localhost-37015-hadoop-hdfs-3_4_1-tests_jar-_-any-4548960188933694571/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:26:02,369 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a0cdfff{HTTP/1.1, (http/1.1)}{localhost:37015} 2024-12-07T13:26:02,369 INFO [Time-limited test {}] server.Server(415): Started @180209ms 2024-12-07T13:26:02,370 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:26:02,389 WARN [ResponseProcessor for block BP-1206754118-172.17.0.3-1733577941197:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1206754118-172.17.0.3-1733577941197:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:02,390 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2016080570_22 at /127.0.0.1:51572 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44959:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51572 dst: /127.0.0.1:44959 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:26:02,393 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59462733{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:26:02,393 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f8a0d0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:26:02,394 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:26:02,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@e979747{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:26:02,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59e94e8a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,STOPPED} 2024-12-07T13:26:02,395 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:26:02,395 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:26:02,395 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:26:02,395 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1206754118-172.17.0.3-1733577941197 (Datanode Uuid 77bea1c3-c274-4930-8d15-a57b4458257f) service to localhost/127.0.0.1:39881 2024-12-07T13:26:02,395 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data3/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:26:02,395 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data4/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:26:02,395 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:26:02,406 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:26:02,410 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:26:02,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:26:02,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:26:02,411 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:26:02,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20da775a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:26:02,411 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e79e191{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:26:02,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75d9c3d8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/java.io.tmpdir/jetty-localhost-35601-hadoop-hdfs-3_4_1-tests_jar-_-any-7986835747122976033/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:26:02,499 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21ffcd24{HTTP/1.1, (http/1.1)}{localhost:35601} 2024-12-07T13:26:02,499 INFO [Time-limited test {}] server.Server(415): Started @180339ms 2024-12-07T13:26:02,501 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:26:02,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:02,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:02,962 WARN [Thread-1405 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:26:02,964 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0978d50b641b631 with lease ID 0x779aeaa5f39a5de1: from storage DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8 node DatanodeRegistration(127.0.0.1:34097, datanodeUuid=b9d6c729-cbc2-4be8-992a-a507464226a8, infoPort=46579, infoSecurePort=0, ipcPort=41245, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T13:26:02,965 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb0978d50b641b631 with lease ID 0x779aeaa5f39a5de1: from storage DS-4af01ffe-413c-4a61-b722-588b40b17a83 node DatanodeRegistration(127.0.0.1:34097, datanodeUuid=b9d6c729-cbc2-4be8-992a-a507464226a8, infoPort=46579, infoSecurePort=0, ipcPort=41245, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:26:03,057 WARN [Thread-1425 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:26:03,060 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa42e2ef5c2c590dc with lease ID 0x779aeaa5f39a5de2: from storage DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176 node DatanodeRegistration(127.0.0.1:36645, datanodeUuid=77bea1c3-c274-4930-8d15-a57b4458257f, infoPort=33229, infoSecurePort=0, ipcPort=35469, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:26:03,060 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa42e2ef5c2c590dc with lease ID 0x779aeaa5f39a5de2: from storage DS-23dcda92-ae07-48fa-beaf-1da0c14e884a node DatanodeRegistration(127.0.0.1:36645, datanodeUuid=77bea1c3-c274-4930-8d15-a57b4458257f, infoPort=33229, infoSecurePort=0, ipcPort=35469, storageInfo=lv=-57;cid=testClusterID;nsid=1619793722;c=1733577941197), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:26:03,518 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-07T13:26:03,524 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-07T13:26:03,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:03,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:03,527 ERROR [FSHLog-0-hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981-prefix:c7c455b68129,40591,1733577943625 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44959,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:03,528 WARN [FSHLog-0-hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981-prefix:c7c455b68129,40591,1733577943625 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44959,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:03,528 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C40591%2C1733577943625:(num 1733577958196) roll requested 2024-12-07T13:26:03,528 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C40591%2C1733577943625.1733577963528 2024-12-07T13:26:03,533 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 newFile=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577963528 2024-12-07T13:26:03,533 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:03,533 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:03,533 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:03,533 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:03,533 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:03,533 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577963528 2024-12-07T13:26:03,534 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44959,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:03,534 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44959,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:03,534 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 2024-12-07T13:26:03,534 WARN [IPC Server handler 0 on default port 39881 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-07T13:26:03,534 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 after 0ms 2024-12-07T13:26:03,534 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46579:46579),(127.0.0.1/127.0.0.1:33229:33229)] 2024-12-07T13:26:03,534 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 is not closed yet, will try archiving it next time 2024-12-07T13:26:04,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:04,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:05,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:05,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:05,536 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C40591%2C1733577943625.1733577965536 2024-12-07T13:26:05,544 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577963528 newFile=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 2024-12-07T13:26:05,544 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:05,544 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:05,545 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:05,545 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:05,545 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:05,545 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577963528 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 2024-12-07T13:26:05,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741838_1019 (size=1264) 2024-12-07T13:26:05,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741838_1019 (size=1264) 2024-12-07T13:26:05,547 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 is not closed yet, will try archiving it next time 2024-12-07T13:26:05,552 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46579:46579),(127.0.0.1/127.0.0.1:33229:33229)] 2024-12-07T13:26:05,552 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 is not closed yet, will try archiving it next time 2024-12-07T13:26:05,552 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 2024-12-07T13:26:05,552 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 2024-12-07T13:26:05,552 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 after 0ms 2024-12-07T13:26:05,553 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 2024-12-07T13:26:05,562 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733577945128/Put/vlen=218/seqid=0] 2024-12-07T13:26:05,562 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733577954802/Put/vlen=1045/seqid=0] 2024-12-07T13:26:05,562 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577944324 2024-12-07T13:26:05,562 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 2024-12-07T13:26:05,562 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 2024-12-07T13:26:05,563 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 after 1ms 2024-12-07T13:26:05,563 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 2024-12-07T13:26:05,566 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733577958195/Put/vlen=1045/seqid=0] 2024-12-07T13:26:05,566 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733577960215/Put/vlen=1045/seqid=0] 2024-12-07T13:26:05,566 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 2024-12-07T13:26:05,566 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577963528 2024-12-07T13:26:05,566 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577963528 2024-12-07T13:26:05,567 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577963528 after 1ms 2024-12-07T13:26:05,567 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577963528 2024-12-07T13:26:05,571 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733577963527/Put/vlen=1045/seqid=0] 2024-12-07T13:26:05,571 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 2024-12-07T13:26:05,571 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 2024-12-07T13:26:05,571 WARN [IPC Server handler 2 on default port 39881 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-07T13:26:05,571 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 after 0ms 2024-12-07T13:26:06,071 WARN [ResponseProcessor for block BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:06,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215074561_22 at /127.0.0.1:58492 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34097:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58492 dst: /127.0.0.1:34097 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:34097 remote=/127.0.0.1:58492]. Total timeout mills is 60000, 59473 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:26:06,071 WARN [DataStreamer for file /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 block BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34097,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK], DatanodeInfoWithStorage[127.0.0.1:36645,DS-ee0760ec-9af4-4b38-8ae1-9d9f41694176,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34097,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]) is bad. 2024-12-07T13:26:06,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215074561_22 at /127.0.0.1:33470 [Receiving block BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:36645:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33470 dst: /127.0.0.1:36645 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:26:06,072 WARN [DataStreamer for file /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 block BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:06,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741839_1022 (size=85) 2024-12-07T13:26:06,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:06,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:07,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:07,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:07,537 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577958196 after 4002ms 2024-12-07T13:26:08,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:08,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:08,967 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-07T13:26:09,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:09,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:09,573 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 after 4002ms 2024-12-07T13:26:09,573 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 2024-12-07T13:26:09,583 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 2024-12-07T13:26:09,583 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-07T13:26:09,584 ERROR [FSHLog-0-hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981-prefix:c7c455b68129,40591,1733577943625.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:09,584 WARN [FSHLog-0-hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981-prefix:c7c455b68129,40591,1733577943625.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:09,584 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C40591%2C1733577943625.meta:.meta(num 1733577944592) roll requested 2024-12-07T13:26:09,584 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C40591%2C1733577943625.meta.1733577969584.meta 2024-12-07T13:26:09,590 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,591 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,591 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,591 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,591 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,591 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.meta.1733577969584.meta 2024-12-07T13:26:09,592 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:09,592 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:09,592 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta 2024-12-07T13:26:09,592 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33229:33229),(127.0.0.1/127.0.0.1:46579:46579)] 2024-12-07T13:26:09,592 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta is not closed yet, will try archiving it next time 2024-12-07T13:26:09,592 WARN [IPC Server handler 0 on default port 39881 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1014 2024-12-07T13:26:09,593 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta after 1ms 2024-12-07T13:26:09,610 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/.tmp/info/f6dfac49f12945ebb1acad4c0ebcdb27 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e./info:regioninfo/1733577945133/Put/seqid=0 2024-12-07T13:26:09,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741841_1025 (size=7125) 2024-12-07T13:26:09,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741841_1025 (size=7125) 2024-12-07T13:26:09,614 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/.tmp/info/f6dfac49f12945ebb1acad4c0ebcdb27 2024-12-07T13:26:09,633 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/.tmp/ns/e33223c1f93a43488d0c1938dfa2ed72 is 43, key is default/ns:d/1733577944727/Put/seqid=0 2024-12-07T13:26:09,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741842_1026 (size=5153) 2024-12-07T13:26:09,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741842_1026 (size=5153) 2024-12-07T13:26:09,638 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/.tmp/ns/e33223c1f93a43488d0c1938dfa2ed72 2024-12-07T13:26:09,656 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/.tmp/table/1450d75ce6fc4b73b3190b65e855dd8d is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733577945142/Put/seqid=0 2024-12-07T13:26:09,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741843_1027 (size=5438) 2024-12-07T13:26:09,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741843_1027 (size=5438) 2024-12-07T13:26:09,661 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/.tmp/table/1450d75ce6fc4b73b3190b65e855dd8d 2024-12-07T13:26:09,665 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/.tmp/info/f6dfac49f12945ebb1acad4c0ebcdb27 as hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/info/f6dfac49f12945ebb1acad4c0ebcdb27 2024-12-07T13:26:09,670 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/info/f6dfac49f12945ebb1acad4c0ebcdb27, entries=10, sequenceid=11, filesize=7.0 K 2024-12-07T13:26:09,671 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/.tmp/ns/e33223c1f93a43488d0c1938dfa2ed72 as hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/ns/e33223c1f93a43488d0c1938dfa2ed72 2024-12-07T13:26:09,677 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/ns/e33223c1f93a43488d0c1938dfa2ed72, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T13:26:09,678 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/.tmp/table/1450d75ce6fc4b73b3190b65e855dd8d as hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/table/1450d75ce6fc4b73b3190b65e855dd8d 2024-12-07T13:26:09,683 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/table/1450d75ce6fc4b73b3190b65e855dd8d, entries=2, sequenceid=11, filesize=5.3 K 2024-12-07T13:26:09,684 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 101ms, sequenceid=11, compaction requested=false 2024-12-07T13:26:09,684 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-07T13:26:09,685 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 29aae1c60a6c9ce3ac6d0c7cf740706e 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-07T13:26:09,685 ERROR [FSHLog-0-hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981-prefix:c7c455b68129,40591,1733577943625 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:09,685 WARN [FSHLog-0-hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981-prefix:c7c455b68129,40591,1733577943625 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:09,685 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C40591%2C1733577943625:(num 1733577965536) roll requested 2024-12-07T13:26:09,685 INFO [regionserver/c7c455b68129:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C40591%2C1733577943625.1733577969685 2024-12-07T13:26:09,691 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 newFile=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577969685 2024-12-07T13:26:09,691 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,691 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,691 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,692 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,692 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,692 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577969685 2024-12-07T13:26:09,692 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:09,693 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33229:33229),(127.0.0.1/127.0.0.1:46579:46579)] 2024-12-07T13:26:09,692 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1206754118-172.17.0.3-1733577941197:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:09,693 DEBUG [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 is not closed yet, will try archiving it next time 2024-12-07T13:26:09,693 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 2024-12-07T13:26:09,693 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 after 0ms 2024-12-07T13:26:09,694 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.1733577965536 to hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/oldWALs/c7c455b68129%2C40591%2C1733577943625.1733577965536 2024-12-07T13:26:09,706 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/default/TestLogRolling-testLogRollOnPipelineRestart/29aae1c60a6c9ce3ac6d0c7cf740706e/.tmp/info/4a93868a2d4240a2905c5f6822a701b1 is 1080, key is row1002/info:/1733577954802/Put/seqid=0 2024-12-07T13:26:09,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741845_1029 (size=9270) 2024-12-07T13:26:09,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741845_1029 (size=9270) 2024-12-07T13:26:09,710 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/default/TestLogRolling-testLogRollOnPipelineRestart/29aae1c60a6c9ce3ac6d0c7cf740706e/.tmp/info/4a93868a2d4240a2905c5f6822a701b1 2024-12-07T13:26:09,716 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/default/TestLogRolling-testLogRollOnPipelineRestart/29aae1c60a6c9ce3ac6d0c7cf740706e/.tmp/info/4a93868a2d4240a2905c5f6822a701b1 as hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/default/TestLogRolling-testLogRollOnPipelineRestart/29aae1c60a6c9ce3ac6d0c7cf740706e/info/4a93868a2d4240a2905c5f6822a701b1 2024-12-07T13:26:09,721 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/default/TestLogRolling-testLogRollOnPipelineRestart/29aae1c60a6c9ce3ac6d0c7cf740706e/info/4a93868a2d4240a2905c5f6822a701b1, entries=4, sequenceid=8, filesize=9.1 K 2024-12-07T13:26:09,722 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 29aae1c60a6c9ce3ac6d0c7cf740706e in 38ms, sequenceid=8, compaction requested=false 2024-12-07T13:26:09,722 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 29aae1c60a6c9ce3ac6d0c7cf740706e: 2024-12-07T13:26:09,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T13:26:09,727 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:26:09,727 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:26:09,727 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:26:09,727 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:26:09,727 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T13:26:09,727 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T13:26:09,727 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=958659110, stopped=false 2024-12-07T13:26:09,727 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c7c455b68129,46131,1733577943366 2024-12-07T13:26:09,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:26:09,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:26:09,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:09,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:09,764 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:26:09,765 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:26:09,765 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:26:09,765 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:26:09,766 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:26:09,766 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:26:09,766 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c7c455b68129,40591,1733577943625' ***** 2024-12-07T13:26:09,766 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T13:26:09,766 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T13:26:09,767 INFO [RS:0;c7c455b68129:40591 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T13:26:09,767 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T13:26:09,767 INFO [RS:0;c7c455b68129:40591 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T13:26:09,767 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(3091): Received CLOSE for 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:26:09,767 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(959): stopping server c7c455b68129,40591,1733577943625 2024-12-07T13:26:09,767 INFO [RS:0;c7c455b68129:40591 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:26:09,767 INFO [RS:0;c7c455b68129:40591 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c7c455b68129:40591. 2024-12-07T13:26:09,767 DEBUG [RS:0;c7c455b68129:40591 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:26:09,767 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 29aae1c60a6c9ce3ac6d0c7cf740706e, disabling compactions & flushes 2024-12-07T13:26:09,768 DEBUG [RS:0;c7c455b68129:40591 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:26:09,768 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:26:09,768 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:26:09,768 INFO [RS:0;c7c455b68129:40591 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T13:26:09,768 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. after waiting 0 ms 2024-12-07T13:26:09,768 INFO [RS:0;c7c455b68129:40591 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T13:26:09,768 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:26:09,768 INFO [RS:0;c7c455b68129:40591 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T13:26:09,768 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T13:26:09,768 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T13:26:09,768 DEBUG [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 29aae1c60a6c9ce3ac6d0c7cf740706e=TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e.} 2024-12-07T13:26:09,768 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:26:09,768 DEBUG [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 29aae1c60a6c9ce3ac6d0c7cf740706e 2024-12-07T13:26:09,769 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:26:09,769 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:26:09,769 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:26:09,769 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:26:09,774 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/default/TestLogRolling-testLogRollOnPipelineRestart/29aae1c60a6c9ce3ac6d0c7cf740706e/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-07T13:26:09,775 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:26:09,775 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T13:26:09,775 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 29aae1c60a6c9ce3ac6d0c7cf740706e: Waiting for close lock at 1733577969767Running coprocessor pre-close hooks at 1733577969767Disabling compacts and flushes for region at 1733577969767Disabling writes for close at 1733577969768 (+1 ms)Writing region close event to WAL at 1733577969769 (+1 ms)Running coprocessor post-close hooks at 1733577969774 (+5 ms)Closed at 1733577969774 2024-12-07T13:26:09,775 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733577944763.29aae1c60a6c9ce3ac6d0c7cf740706e. 2024-12-07T13:26:09,775 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:26:09,775 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:26:09,775 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733577969768Running coprocessor pre-close hooks at 1733577969768Disabling compacts and flushes for region at 1733577969768Disabling writes for close at 1733577969769 (+1 ms)Writing region close event to WAL at 1733577969771 (+2 ms)Running coprocessor post-close hooks at 1733577969775 (+4 ms)Closed at 1733577969775 2024-12-07T13:26:09,776 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T13:26:09,969 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(976): stopping server c7c455b68129,40591,1733577943625; all regions closed. 2024-12-07T13:26:09,970 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,970 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,971 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,971 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,971 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:09,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741840_1023 (size=825) 2024-12-07T13:26:09,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741840_1023 (size=825) 2024-12-07T13:26:10,193 INFO [regionserver/c7c455b68129:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-07T13:26:10,194 INFO [regionserver/c7c455b68129:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-07T13:26:10,194 INFO [regionserver/c7c455b68129:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:26:10,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:10,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:11,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:11,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:12,061 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-07T13:26:12,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:12,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:13,343 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T13:26:13,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:13,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:13,593 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta after 4001ms 2024-12-07T13:26:13,594 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/WALs/c7c455b68129,40591,1733577943625/c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta to hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/oldWALs/c7c455b68129%2C40591%2C1733577943625.meta.1733577944592.meta 2024-12-07T13:26:13,596 DEBUG [RS:0;c7c455b68129:40591 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/oldWALs 2024-12-07T13:26:13,597 INFO [RS:0;c7c455b68129:40591 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C40591%2C1733577943625.meta:.meta(num 1733577969584) 2024-12-07T13:26:13,597 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,597 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,597 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,597 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741844_1028 (size=1162) 2024-12-07T13:26:13,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741844_1028 (size=1162) 2024-12-07T13:26:13,603 DEBUG [RS:0;c7c455b68129:40591 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/oldWALs 2024-12-07T13:26:13,603 INFO [RS:0;c7c455b68129:40591 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C40591%2C1733577943625:(num 1733577969685) 2024-12-07T13:26:13,603 DEBUG [RS:0;c7c455b68129:40591 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:26:13,603 INFO [RS:0;c7c455b68129:40591 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:26:13,604 INFO [RS:0;c7c455b68129:40591 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:26:13,604 INFO [RS:0;c7c455b68129:40591 {}] hbase.ChoreService(370): Chore service for: regionserver/c7c455b68129:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T13:26:13,604 INFO [RS:0;c7c455b68129:40591 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:26:13,604 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:26:13,604 INFO [RS:0;c7c455b68129:40591 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:40591 2024-12-07T13:26:13,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:26:13,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c7c455b68129,40591,1733577943625 2024-12-07T13:26:13,616 INFO [RS:0;c7c455b68129:40591 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:26:13,627 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c7c455b68129,40591,1733577943625] 2024-12-07T13:26:13,637 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c7c455b68129,40591,1733577943625 already deleted, retry=false 2024-12-07T13:26:13,638 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c7c455b68129,40591,1733577943625 expired; onlineServers=0 2024-12-07T13:26:13,638 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c7c455b68129,46131,1733577943366' ***** 2024-12-07T13:26:13,638 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T13:26:13,638 INFO [M:0;c7c455b68129:46131 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:26:13,638 INFO [M:0;c7c455b68129:46131 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:26:13,638 DEBUG [M:0;c7c455b68129:46131 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T13:26:13,638 DEBUG [M:0;c7c455b68129:46131 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T13:26:13,638 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T13:26:13,638 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577943965 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577943965,5,FailOnTimeoutGroup] 2024-12-07T13:26:13,638 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577943965 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577943965,5,FailOnTimeoutGroup] 2024-12-07T13:26:13,638 INFO [M:0;c7c455b68129:46131 {}] hbase.ChoreService(370): Chore service for: master/c7c455b68129:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T13:26:13,638 INFO [M:0;c7c455b68129:46131 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:26:13,638 DEBUG [M:0;c7c455b68129:46131 {}] master.HMaster(1795): Stopping service threads 2024-12-07T13:26:13,638 INFO [M:0;c7c455b68129:46131 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T13:26:13,638 INFO [M:0;c7c455b68129:46131 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:26:13,639 INFO [M:0;c7c455b68129:46131 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T13:26:13,639 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T13:26:13,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T13:26:13,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:13,648 DEBUG [M:0;c7c455b68129:46131 {}] zookeeper.ZKUtil(347): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T13:26:13,648 WARN [M:0;c7c455b68129:46131 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T13:26:13,649 INFO [M:0;c7c455b68129:46131 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/.lastflushedseqids 2024-12-07T13:26:13,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741846_1030 (size=120) 2024-12-07T13:26:13,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741846_1030 (size=120) 2024-12-07T13:26:13,657 INFO [M:0;c7c455b68129:46131 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T13:26:13,660 INFO [M:0;c7c455b68129:46131 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T13:26:13,660 DEBUG [M:0;c7c455b68129:46131 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:26:13,660 INFO [M:0;c7c455b68129:46131 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:26:13,660 DEBUG [M:0;c7c455b68129:46131 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:26:13,660 DEBUG [M:0;c7c455b68129:46131 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:26:13,660 DEBUG [M:0;c7c455b68129:46131 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:26:13,660 INFO [M:0;c7c455b68129:46131 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-12-07T13:26:13,661 ERROR [FSHLog-0-hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData-prefix:c7c455b68129,46131,1733577943366 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:13,661 WARN [FSHLog-0-hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData-prefix:c7c455b68129,46131,1733577943366 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:13,661 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog c7c455b68129%2C46131%2C1733577943366:(num 1733577943752) roll requested 2024-12-07T13:26:13,661 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C46131%2C1733577943366.1733577973661 2024-12-07T13:26:13,667 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,667 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,668 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,668 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,668 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,668 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366/c7c455b68129%2C46131%2C1733577943366.1733577943752 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366/c7c455b68129%2C46131%2C1733577943366.1733577973661 2024-12-07T13:26:13,668 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:13,668 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45957,DS-4c1e617c-9512-4fc9-9d13-fd859d33efc8,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-07T13:26:13,668 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366/c7c455b68129%2C46131%2C1733577943366.1733577943752 2024-12-07T13:26:13,669 WARN [IPC Server handler 0 on default port 39881 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366/c7c455b68129%2C46131%2C1733577943366.1733577943752 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-12-07T13:26:13,669 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46579:46579),(127.0.0.1/127.0.0.1:33229:33229)] 2024-12-07T13:26:13,669 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366/c7c455b68129%2C46131%2C1733577943366.1733577943752 is not closed yet, will try archiving it next time 2024-12-07T13:26:13,669 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366/c7c455b68129%2C46131%2C1733577943366.1733577943752 after 1ms 2024-12-07T13:26:13,684 DEBUG [M:0;c7c455b68129:46131 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/89f4aa4a49b747938615dbd601cbdf86 is 82, key is hbase:meta,,1/info:regioninfo/1733577944619/Put/seqid=0 2024-12-07T13:26:13,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741848_1033 (size=5672) 2024-12-07T13:26:13,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741848_1033 (size=5672) 2024-12-07T13:26:13,691 INFO [M:0;c7c455b68129:46131 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/89f4aa4a49b747938615dbd601cbdf86 2024-12-07T13:26:13,708 DEBUG [M:0;c7c455b68129:46131 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ddc2e97653794420afa8fac3dcf9aaf5 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733577945147/Put/seqid=0 2024-12-07T13:26:13,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741849_1034 (size=6118) 2024-12-07T13:26:13,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741849_1034 (size=6118) 2024-12-07T13:26:13,713 INFO [M:0;c7c455b68129:46131 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ddc2e97653794420afa8fac3dcf9aaf5 2024-12-07T13:26:13,727 INFO [RS:0;c7c455b68129:40591 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:26:13,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:26:13,727 INFO [RS:0;c7c455b68129:40591 {}] regionserver.HRegionServer(1031): Exiting; stopping=c7c455b68129,40591,1733577943625; zookeeper connection closed. 2024-12-07T13:26:13,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40591-0x10000742ff90001, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:26:13,727 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1129e39e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1129e39e 2024-12-07T13:26:13,728 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T13:26:13,731 DEBUG [M:0;c7c455b68129:46131 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45fcf3d6b70b4edeb4e0252fa3890468 is 69, key is c7c455b68129,40591,1733577943625/rs:state/1733577944170/Put/seqid=0 2024-12-07T13:26:13,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741850_1035 (size=5156) 2024-12-07T13:26:13,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741850_1035 (size=5156) 2024-12-07T13:26:13,736 INFO [M:0;c7c455b68129:46131 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45fcf3d6b70b4edeb4e0252fa3890468 2024-12-07T13:26:13,754 DEBUG [M:0;c7c455b68129:46131 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5aae468757864bd7a4044508ae7ea230 is 52, key is load_balancer_on/state:d/1733577944759/Put/seqid=0 2024-12-07T13:26:13,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741851_1036 (size=5056) 2024-12-07T13:26:13,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741851_1036 (size=5056) 2024-12-07T13:26:13,759 INFO [M:0;c7c455b68129:46131 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5aae468757864bd7a4044508ae7ea230 2024-12-07T13:26:13,764 DEBUG [M:0;c7c455b68129:46131 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/89f4aa4a49b747938615dbd601cbdf86 as hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/89f4aa4a49b747938615dbd601cbdf86 2024-12-07T13:26:13,768 INFO [M:0;c7c455b68129:46131 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/89f4aa4a49b747938615dbd601cbdf86, entries=8, sequenceid=56, filesize=5.5 K 2024-12-07T13:26:13,769 DEBUG [M:0;c7c455b68129:46131 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ddc2e97653794420afa8fac3dcf9aaf5 as hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ddc2e97653794420afa8fac3dcf9aaf5 2024-12-07T13:26:13,774 INFO [M:0;c7c455b68129:46131 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ddc2e97653794420afa8fac3dcf9aaf5, entries=6, sequenceid=56, filesize=6.0 K 2024-12-07T13:26:13,775 DEBUG [M:0;c7c455b68129:46131 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45fcf3d6b70b4edeb4e0252fa3890468 as hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/45fcf3d6b70b4edeb4e0252fa3890468 2024-12-07T13:26:13,780 INFO [M:0;c7c455b68129:46131 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/45fcf3d6b70b4edeb4e0252fa3890468, entries=1, sequenceid=56, filesize=5.0 K 2024-12-07T13:26:13,781 DEBUG [M:0;c7c455b68129:46131 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5aae468757864bd7a4044508ae7ea230 as hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5aae468757864bd7a4044508ae7ea230 2024-12-07T13:26:13,786 INFO [M:0;c7c455b68129:46131 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5aae468757864bd7a4044508ae7ea230, entries=1, sequenceid=56, filesize=4.9 K 2024-12-07T13:26:13,787 INFO [M:0;c7c455b68129:46131 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=56, compaction requested=false 2024-12-07T13:26:13,788 INFO [M:0;c7c455b68129:46131 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:26:13,788 DEBUG [M:0;c7c455b68129:46131 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733577973660Disabling compacts and flushes for region at 1733577973660Disabling writes for close at 1733577973660Obtaining lock to block concurrent updates at 1733577973660Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733577973660Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733577973661 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733577973670 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733577973670Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733577973684 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733577973684Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733577973697 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733577973708 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733577973708Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733577973718 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733577973731 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733577973731Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733577973740 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733577973754 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733577973754Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c0602bd: reopening flushed file at 1733577973763 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60089394: reopening flushed file at 1733577973768 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1250e47f: reopening flushed file at 1733577973774 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23f89887: reopening flushed file at 1733577973780 (+6 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=56, compaction requested=false at 1733577973787 (+7 ms)Writing region close event to WAL at 1733577973788 (+1 ms)Closed at 1733577973788 2024-12-07T13:26:13,788 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,788 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,788 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,788 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,788 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:13,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36645 is added to blk_1073741847_1031 (size=757) 2024-12-07T13:26:13,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34097 is added to blk_1073741847_1031 (size=757) 2024-12-07T13:26:14,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:14,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:14,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,800 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:14,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,062 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-07T13:26:15,306 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:26:15,309 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,310 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,311 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:15,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:15,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:16,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:16,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:17,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:17,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:17,670 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366/c7c455b68129%2C46131%2C1733577943366.1733577943752 after 4002ms 2024-12-07T13:26:17,671 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/WALs/c7c455b68129,46131,1733577943366/c7c455b68129%2C46131%2C1733577943366.1733577943752 to hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/oldWALs/c7c455b68129%2C46131%2C1733577943366.1733577943752 2024-12-07T13:26:17,675 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/MasterData/oldWALs/c7c455b68129%2C46131%2C1733577943366.1733577943752 to hdfs://localhost:39881/user/jenkins/test-data/67b70b5f-1c1f-5937-31c0-bdfce17a3981/oldWALs/c7c455b68129%2C46131%2C1733577943366.1733577943752$masterlocalwal$ 2024-12-07T13:26:17,676 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:26:17,676 INFO [M:0;c7c455b68129:46131 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T13:26:17,676 INFO [M:0;c7c455b68129:46131 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46131 2024-12-07T13:26:17,676 INFO [M:0;c7c455b68129:46131 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:26:17,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:26:17,790 INFO [M:0;c7c455b68129:46131 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:26:17,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46131-0x10000742ff90000, quorum=127.0.0.1:49551, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:26:17,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75d9c3d8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:26:17,793 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21ffcd24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:26:17,793 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:26:17,794 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e79e191{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:26:17,794 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20da775a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,STOPPED} 2024-12-07T13:26:17,795 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:26:17,795 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1206754118-172.17.0.3-1733577941197 (Datanode Uuid 77bea1c3-c274-4930-8d15-a57b4458257f) service to localhost/127.0.0.1:39881 2024-12-07T13:26:17,795 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:26:17,795 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:26:17,796 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data3/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:26:17,796 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data4/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:26:17,796 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:26:17,799 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@424d5648{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:26:17,799 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a0cdfff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:26:17,799 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:26:17,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21d94b42{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:26:17,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61aceb4d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,STOPPED} 2024-12-07T13:26:17,801 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:26:17,801 WARN [BP-1206754118-172.17.0.3-1733577941197 heartbeating to localhost/127.0.0.1:39881 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1206754118-172.17.0.3-1733577941197 (Datanode Uuid b9d6c729-cbc2-4be8-992a-a507464226a8) service to localhost/127.0.0.1:39881 2024-12-07T13:26:17,801 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:26:17,801 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:26:17,801 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data1/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:26:17,802 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/cluster_2d1942de-40e1-2fdd-f068-731fbebee115/data/data2/current/BP-1206754118-172.17.0.3-1733577941197 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:26:17,802 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:26:17,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@37fa3e5a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:26:17,808 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21865735{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:26:17,808 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:26:17,808 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60017892{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:26:17,808 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@482b04db{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir/,STOPPED} 2024-12-07T13:26:17,813 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T13:26:17,831 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T13:26:17,840 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39881 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39881 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:39881 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39881 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:39881 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39881 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39881 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39881 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=54 (was 75), ProcessCount=11 (was 11), AvailableMemoryMB=15413 (was 15635) 2024-12-07T13:26:17,846 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=54, ProcessCount=11, AvailableMemoryMB=15413 2024-12-07T13:26:17,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T13:26:17,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.log.dir so I do NOT create it in target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d 2024-12-07T13:26:17,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81f6c458-8c2a-490e-e78c-b838416dbcd5/hadoop.tmp.dir so I do NOT create it in target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d 2024-12-07T13:26:17,847 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6, deleteOnExit=true 2024-12-07T13:26:17,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T13:26:17,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/test.cache.data in system properties and HBase conf 2024-12-07T13:26:17,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T13:26:17,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/hadoop.log.dir in system properties and HBase conf 2024-12-07T13:26:17,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T13:26:17,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T13:26:17,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T13:26:17,848 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T13:26:17,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:26:17,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:26:17,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T13:26:17,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:26:17,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T13:26:17,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T13:26:17,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:26:17,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:26:17,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T13:26:17,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/nfs.dump.dir in system properties and HBase conf 2024-12-07T13:26:17,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/java.io.tmpdir in system properties and HBase conf 2024-12-07T13:26:17,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:26:17,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T13:26:17,849 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T13:26:17,862 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:26:18,226 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:26:18,229 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:26:18,233 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:26:18,234 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:26:18,234 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:26:18,234 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:26:18,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3df88721{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:26:18,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f08894b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:26:18,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e21aaf2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/java.io.tmpdir/jetty-localhost-42669-hadoop-hdfs-3_4_1-tests_jar-_-any-2856734005529759881/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:26:18,330 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6dc3ea71{HTTP/1.1, (http/1.1)}{localhost:42669} 2024-12-07T13:26:18,330 INFO [Time-limited test {}] server.Server(415): Started @196170ms 2024-12-07T13:26:18,341 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:26:18,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:26:18,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T13:26:18,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T13:26:18,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-07T13:26:18,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:18,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:18,567 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:26:18,570 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:26:18,571 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:26:18,571 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:26:18,571 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:26:18,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72ef9fa2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:26:18,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@438a440e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:26:18,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d40a54d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/java.io.tmpdir/jetty-localhost-37455-hadoop-hdfs-3_4_1-tests_jar-_-any-6270316670391214436/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:26:18,662 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74b5ebca{HTTP/1.1, (http/1.1)}{localhost:37455} 2024-12-07T13:26:18,662 INFO [Time-limited test {}] server.Server(415): Started @196502ms 2024-12-07T13:26:18,663 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:26:18,686 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:26:18,689 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:26:18,689 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:26:18,689 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:26:18,690 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:26:18,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d7944b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:26:18,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@280e4ad5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:26:18,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c429e05{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/java.io.tmpdir/jetty-localhost-37263-hadoop-hdfs-3_4_1-tests_jar-_-any-10189476290851067736/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:26:18,781 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1f411ba{HTTP/1.1, (http/1.1)}{localhost:37263} 2024-12-07T13:26:18,781 INFO [Time-limited test {}] server.Server(415): Started @196621ms 2024-12-07T13:26:18,782 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:26:19,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:19,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:20,476 WARN [Thread-1645 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/data/data1/current/BP-2093863607-172.17.0.3-1733577977871/current, will proceed with Du for space computation calculation, 2024-12-07T13:26:20,476 WARN [Thread-1646 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/data/data2/current/BP-2093863607-172.17.0.3-1733577977871/current, will proceed with Du for space computation calculation, 2024-12-07T13:26:20,496 WARN [Thread-1609 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:26:20,498 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc86d8a9702af30cc with lease ID 0x47263e584f55a274: Processing first storage report for DS-685b0eb4-ff6f-40a9-baf3-f4e669f03afa from datanode DatanodeRegistration(127.0.0.1:34847, datanodeUuid=bf4956d9-ddc8-4f86-a001-0f12b79b7c00, infoPort=39617, infoSecurePort=0, ipcPort=38365, storageInfo=lv=-57;cid=testClusterID;nsid=1758420790;c=1733577977871) 2024-12-07T13:26:20,498 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc86d8a9702af30cc with lease ID 0x47263e584f55a274: from storage DS-685b0eb4-ff6f-40a9-baf3-f4e669f03afa node DatanodeRegistration(127.0.0.1:34847, datanodeUuid=bf4956d9-ddc8-4f86-a001-0f12b79b7c00, infoPort=39617, infoSecurePort=0, ipcPort=38365, storageInfo=lv=-57;cid=testClusterID;nsid=1758420790;c=1733577977871), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:26:20,498 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc86d8a9702af30cc with lease ID 0x47263e584f55a274: Processing first storage report for DS-c438f210-569e-4201-91d9-bad61aae04fa from datanode DatanodeRegistration(127.0.0.1:34847, datanodeUuid=bf4956d9-ddc8-4f86-a001-0f12b79b7c00, infoPort=39617, infoSecurePort=0, ipcPort=38365, storageInfo=lv=-57;cid=testClusterID;nsid=1758420790;c=1733577977871) 2024-12-07T13:26:20,498 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc86d8a9702af30cc with lease ID 0x47263e584f55a274: from storage DS-c438f210-569e-4201-91d9-bad61aae04fa node DatanodeRegistration(127.0.0.1:34847, datanodeUuid=bf4956d9-ddc8-4f86-a001-0f12b79b7c00, infoPort=39617, infoSecurePort=0, ipcPort=38365, storageInfo=lv=-57;cid=testClusterID;nsid=1758420790;c=1733577977871), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:26:20,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:20,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:20,590 WARN [Thread-1656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/data/data3/current/BP-2093863607-172.17.0.3-1733577977871/current, will proceed with Du for space computation calculation, 2024-12-07T13:26:20,590 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/data/data4/current/BP-2093863607-172.17.0.3-1733577977871/current, will proceed with Du for space computation calculation, 2024-12-07T13:26:20,609 WARN [Thread-1632 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:26:20,611 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5abd76b7b8af0227 with lease ID 0x47263e584f55a275: Processing first storage report for DS-4d4f8905-341c-409c-8365-ef85d408ad7b from datanode DatanodeRegistration(127.0.0.1:33687, datanodeUuid=6007c426-a7fc-4892-9352-97996599d0ac, infoPort=45911, infoSecurePort=0, ipcPort=33609, storageInfo=lv=-57;cid=testClusterID;nsid=1758420790;c=1733577977871) 2024-12-07T13:26:20,611 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5abd76b7b8af0227 with lease ID 0x47263e584f55a275: from storage DS-4d4f8905-341c-409c-8365-ef85d408ad7b node DatanodeRegistration(127.0.0.1:33687, datanodeUuid=6007c426-a7fc-4892-9352-97996599d0ac, infoPort=45911, infoSecurePort=0, ipcPort=33609, storageInfo=lv=-57;cid=testClusterID;nsid=1758420790;c=1733577977871), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:26:20,611 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5abd76b7b8af0227 with lease ID 0x47263e584f55a275: Processing first storage report for DS-df5abc31-e1e2-49c0-a2fb-a79d25eadd63 from datanode DatanodeRegistration(127.0.0.1:33687, datanodeUuid=6007c426-a7fc-4892-9352-97996599d0ac, infoPort=45911, infoSecurePort=0, ipcPort=33609, storageInfo=lv=-57;cid=testClusterID;nsid=1758420790;c=1733577977871) 2024-12-07T13:26:20,611 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5abd76b7b8af0227 with lease ID 0x47263e584f55a275: from storage DS-df5abc31-e1e2-49c0-a2fb-a79d25eadd63 node DatanodeRegistration(127.0.0.1:33687, datanodeUuid=6007c426-a7fc-4892-9352-97996599d0ac, infoPort=45911, infoSecurePort=0, ipcPort=33609, storageInfo=lv=-57;cid=testClusterID;nsid=1758420790;c=1733577977871), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:26:20,613 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d 2024-12-07T13:26:20,615 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/zookeeper_0, clientPort=60915, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T13:26:20,616 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60915 2024-12-07T13:26:20,616 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:26:20,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:26:20,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:26:20,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:26:20,627 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a with version=8 2024-12-07T13:26:20,627 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/hbase-staging 2024-12-07T13:26:20,629 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:26:20,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:26:20,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:26:20,629 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:26:20,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:26:20,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:26:20,629 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T13:26:20,629 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:26:20,630 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33653 2024-12-07T13:26:20,631 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33653 connecting to ZooKeeper ensemble=127.0.0.1:60915 2024-12-07T13:26:20,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:336530x0, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:26:20,701 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33653-0x1000074c18e0000 connected 2024-12-07T13:26:20,784 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:26:20,787 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:26:20,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:26:20,790 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a, hbase.cluster.distributed=false 2024-12-07T13:26:20,792 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:26:20,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33653 2024-12-07T13:26:20,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33653 2024-12-07T13:26:20,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33653 2024-12-07T13:26:20,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33653 2024-12-07T13:26:20,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33653 2024-12-07T13:26:20,811 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:26:20,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:26:20,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:26:20,812 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:26:20,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:26:20,812 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:26:20,812 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T13:26:20,812 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:26:20,813 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34499 2024-12-07T13:26:20,814 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34499 connecting to ZooKeeper ensemble=127.0.0.1:60915 2024-12-07T13:26:20,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:26:20,816 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:26:20,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:344990x0, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:26:20,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:344990x0, quorum=127.0.0.1:60915, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:26:20,827 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34499-0x1000074c18e0001 connected 2024-12-07T13:26:20,827 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T13:26:20,827 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T13:26:20,828 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T13:26:20,829 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:26:20,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34499 2024-12-07T13:26:20,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34499 2024-12-07T13:26:20,829 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34499 2024-12-07T13:26:20,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34499 2024-12-07T13:26:20,830 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34499 2024-12-07T13:26:20,842 DEBUG [M:0;c7c455b68129:33653 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c7c455b68129:33653 2024-12-07T13:26:20,843 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c7c455b68129,33653,1733577980629 2024-12-07T13:26:20,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:26:20,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:26:20,855 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c7c455b68129,33653,1733577980629 2024-12-07T13:26:20,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T13:26:20,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:20,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:20,866 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T13:26:20,867 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c7c455b68129,33653,1733577980629 from backup master directory 2024-12-07T13:26:20,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c7c455b68129,33653,1733577980629 2024-12-07T13:26:20,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:26:20,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:26:20,879 WARN [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:26:20,879 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c7c455b68129,33653,1733577980629 2024-12-07T13:26:20,887 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/hbase.id] with ID: 246f049c-9c8e-4144-bfc2-441a448be4f3 2024-12-07T13:26:20,887 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/.tmp/hbase.id 2024-12-07T13:26:20,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:26:20,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:26:20,895 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/.tmp/hbase.id]:[hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/hbase.id] 2024-12-07T13:26:20,906 INFO [master/c7c455b68129:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:26:20,906 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T13:26:20,907 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-07T13:26:20,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:20,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:20,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:26:20,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:26:20,925 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:26:20,926 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T13:26:20,926 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:26:20,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:26:20,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:26:20,934 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store 2024-12-07T13:26:20,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:26:20,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:26:20,940 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:26:20,940 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:26:20,940 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:26:20,940 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:26:20,940 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:26:20,940 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:26:20,940 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:26:20,940 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733577980940Disabling compacts and flushes for region at 1733577980940Disabling writes for close at 1733577980940Writing region close event to WAL at 1733577980940Closed at 1733577980940 2024-12-07T13:26:20,941 WARN [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/.initializing 2024-12-07T13:26:20,941 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/WALs/c7c455b68129,33653,1733577980629 2024-12-07T13:26:20,944 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C33653%2C1733577980629, suffix=, logDir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/WALs/c7c455b68129,33653,1733577980629, archiveDir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/oldWALs, maxLogs=10 2024-12-07T13:26:20,944 INFO [master/c7c455b68129:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C33653%2C1733577980629.1733577980944 2024-12-07T13:26:20,948 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/WALs/c7c455b68129,33653,1733577980629/c7c455b68129%2C33653%2C1733577980629.1733577980944 2024-12-07T13:26:20,951 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45911:45911),(127.0.0.1/127.0.0.1:39617:39617)] 2024-12-07T13:26:20,951 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:26:20,952 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:26:20,952 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,952 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,953 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T13:26:20,954 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:20,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:26:20,954 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,955 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T13:26:20,955 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:20,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:26:20,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T13:26:20,956 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:20,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:26:20,957 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T13:26:20,958 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:20,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:26:20,958 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,959 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,959 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,960 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,960 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,961 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T13:26:20,962 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:26:20,964 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:26:20,964 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815389, jitterRate=0.03682146966457367}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T13:26:20,965 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733577980952Initializing all the Stores at 1733577980952Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577980952Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577980953 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577980953Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577980953Cleaning up temporary data from old regions at 1733577980960 (+7 ms)Region opened successfully at 1733577980965 (+5 ms) 2024-12-07T13:26:20,965 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T13:26:20,968 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cac316a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:26:20,969 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T13:26:20,970 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T13:26:20,970 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T13:26:20,970 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T13:26:20,970 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T13:26:20,971 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T13:26:20,971 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T13:26:20,973 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T13:26:20,974 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T13:26:20,984 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T13:26:20,985 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T13:26:20,986 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T13:26:20,995 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T13:26:20,995 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T13:26:20,997 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T13:26:21,005 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T13:26:21,007 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T13:26:21,016 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T13:26:21,021 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T13:26:21,033 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T13:26:21,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:26:21,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:26:21,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:21,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:21,049 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c7c455b68129,33653,1733577980629, sessionid=0x1000074c18e0000, setting cluster-up flag (Was=false) 2024-12-07T13:26:21,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:21,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:21,100 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T13:26:21,102 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,33653,1733577980629 2024-12-07T13:26:21,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:21,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:21,153 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T13:26:21,155 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,33653,1733577980629 2024-12-07T13:26:21,158 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T13:26:21,160 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T13:26:21,161 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T13:26:21,161 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T13:26:21,161 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c7c455b68129,33653,1733577980629 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T13:26:21,162 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:26:21,162 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:26:21,162 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:26:21,162 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:26:21,162 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c7c455b68129:0, corePoolSize=10, maxPoolSize=10 2024-12-07T13:26:21,162 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,162 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:26:21,162 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,165 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:26:21,166 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T13:26:21,166 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733578011166 2024-12-07T13:26:21,166 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T13:26:21,166 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T13:26:21,166 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T13:26:21,166 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T13:26:21,166 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T13:26:21,166 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T13:26:21,167 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:21,167 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T13:26:21,169 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,169 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T13:26:21,169 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T13:26:21,169 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T13:26:21,170 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T13:26:21,170 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T13:26:21,171 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577981170,5,FailOnTimeoutGroup] 2024-12-07T13:26:21,172 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577981171,5,FailOnTimeoutGroup] 2024-12-07T13:26:21,173 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,173 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T13:26:21,173 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,173 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:26:21,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:26:21,179 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T13:26:21,179 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a 2024-12-07T13:26:21,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:26:21,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:26:21,188 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:26:21,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:26:21,190 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:26:21,191 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:21,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:26:21,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:26:21,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:26:21,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:21,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:26:21,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:26:21,194 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:26:21,194 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:21,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:26:21,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:26:21,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:26:21,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:21,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:26:21,196 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:26:21,197 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740 2024-12-07T13:26:21,197 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740 2024-12-07T13:26:21,198 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:26:21,198 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:26:21,199 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:26:21,200 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:26:21,202 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:26:21,202 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=865337, jitterRate=0.10033315420150757}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:26:21,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733577981188Initializing all the Stores at 1733577981189 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577981189Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577981189Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577981189Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577981189Cleaning up temporary data from old regions at 1733577981198 (+9 ms)Region opened successfully at 1733577981202 (+4 ms) 2024-12-07T13:26:21,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:26:21,203 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:26:21,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:26:21,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:26:21,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:26:21,203 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:26:21,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733577981202Disabling compacts and flushes for region at 1733577981202Disabling writes for close at 1733577981203 (+1 ms)Writing region close event to WAL at 1733577981203Closed at 1733577981203 2024-12-07T13:26:21,204 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:26:21,204 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T13:26:21,204 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T13:26:21,206 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:26:21,207 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T13:26:21,232 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(746): ClusterId : 246f049c-9c8e-4144-bfc2-441a448be4f3 2024-12-07T13:26:21,232 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T13:26:21,245 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T13:26:21,245 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T13:26:21,255 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T13:26:21,255 DEBUG [RS:0;c7c455b68129:34499 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d586b79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:26:21,265 DEBUG [RS:0;c7c455b68129:34499 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c7c455b68129:34499 2024-12-07T13:26:21,265 INFO [RS:0;c7c455b68129:34499 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T13:26:21,265 INFO [RS:0;c7c455b68129:34499 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T13:26:21,265 DEBUG [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T13:26:21,266 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(2659): reportForDuty to master=c7c455b68129,33653,1733577980629 with port=34499, startcode=1733577980811 2024-12-07T13:26:21,266 DEBUG [RS:0;c7c455b68129:34499 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T13:26:21,268 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49049, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T13:26:21,268 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33653 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c7c455b68129,34499,1733577980811 2024-12-07T13:26:21,268 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33653 {}] master.ServerManager(517): Registering regionserver=c7c455b68129,34499,1733577980811 2024-12-07T13:26:21,270 DEBUG [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a 2024-12-07T13:26:21,270 DEBUG [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35389 2024-12-07T13:26:21,270 DEBUG [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T13:26:21,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:26:21,280 DEBUG [RS:0;c7c455b68129:34499 {}] zookeeper.ZKUtil(111): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c7c455b68129,34499,1733577980811 2024-12-07T13:26:21,280 WARN [RS:0;c7c455b68129:34499 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:26:21,280 INFO [RS:0;c7c455b68129:34499 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:26:21,280 DEBUG [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811 2024-12-07T13:26:21,280 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c7c455b68129,34499,1733577980811] 2024-12-07T13:26:21,283 INFO [RS:0;c7c455b68129:34499 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T13:26:21,284 INFO [RS:0;c7c455b68129:34499 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T13:26:21,284 INFO [RS:0;c7c455b68129:34499 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T13:26:21,285 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,285 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T13:26:21,285 INFO [RS:0;c7c455b68129:34499 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T13:26:21,286 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:26:21,286 DEBUG [RS:0;c7c455b68129:34499 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:26:21,289 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,289 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,289 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,289 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,289 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,289 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,34499,1733577980811-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:26:21,303 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T13:26:21,303 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,34499,1733577980811-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,303 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,303 INFO [RS:0;c7c455b68129:34499 {}] regionserver.Replication(171): c7c455b68129,34499,1733577980811 started 2024-12-07T13:26:21,316 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,316 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(1482): Serving as c7c455b68129,34499,1733577980811, RpcServer on c7c455b68129/172.17.0.3:34499, sessionid=0x1000074c18e0001 2024-12-07T13:26:21,316 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T13:26:21,316 DEBUG [RS:0;c7c455b68129:34499 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c7c455b68129,34499,1733577980811 2024-12-07T13:26:21,316 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,34499,1733577980811' 2024-12-07T13:26:21,316 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T13:26:21,317 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T13:26:21,318 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T13:26:21,318 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T13:26:21,318 DEBUG [RS:0;c7c455b68129:34499 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c7c455b68129,34499,1733577980811 2024-12-07T13:26:21,318 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,34499,1733577980811' 2024-12-07T13:26:21,318 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T13:26:21,318 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T13:26:21,319 DEBUG [RS:0;c7c455b68129:34499 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T13:26:21,319 INFO [RS:0;c7c455b68129:34499 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T13:26:21,319 INFO [RS:0;c7c455b68129:34499 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T13:26:21,357 WARN [c7c455b68129:33653 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T13:26:21,421 INFO [RS:0;c7c455b68129:34499 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C34499%2C1733577980811, suffix=, logDir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811, archiveDir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/oldWALs, maxLogs=32 2024-12-07T13:26:21,421 INFO [RS:0;c7c455b68129:34499 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C34499%2C1733577980811.1733577981421 2024-12-07T13:26:21,428 INFO [RS:0;c7c455b68129:34499 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733577981421 2024-12-07T13:26:21,429 DEBUG [RS:0;c7c455b68129:34499 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45911:45911),(127.0.0.1/127.0.0.1:39617:39617)] 2024-12-07T13:26:21,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:21,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:21,607 DEBUG [c7c455b68129:33653 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T13:26:21,608 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c7c455b68129,34499,1733577980811 2024-12-07T13:26:21,609 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,34499,1733577980811, state=OPENING 2024-12-07T13:26:21,623 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T13:26:21,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:21,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:26:21,634 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:26:21,634 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:26:21,634 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:26:21,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,34499,1733577980811}] 2024-12-07T13:26:21,787 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T13:26:21,790 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51745, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T13:26:21,796 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T13:26:21,796 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:26:21,801 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C34499%2C1733577980811.meta, suffix=.meta, logDir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811, archiveDir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/oldWALs, maxLogs=32 2024-12-07T13:26:21,801 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C34499%2C1733577980811.meta.1733577981801.meta 2024-12-07T13:26:21,806 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.meta.1733577981801.meta 2024-12-07T13:26:21,807 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45911:45911),(127.0.0.1/127.0.0.1:39617:39617)] 2024-12-07T13:26:21,808 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:26:21,808 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T13:26:21,809 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T13:26:21,809 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T13:26:21,809 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T13:26:21,809 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:26:21,809 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T13:26:21,809 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T13:26:21,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:26:21,812 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:26:21,812 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:21,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:26:21,813 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:26:21,814 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:26:21,814 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:21,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:26:21,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:26:21,816 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:26:21,816 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:21,817 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:26:21,817 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:26:21,818 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:26:21,818 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:21,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:26:21,818 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:26:21,819 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740 2024-12-07T13:26:21,820 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740 2024-12-07T13:26:21,821 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:26:21,821 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:26:21,821 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:26:21,822 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:26:21,823 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862917, jitterRate=0.09725631773471832}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:26:21,823 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T13:26:21,824 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733577981809Writing region info on filesystem at 1733577981809Initializing all the Stores at 1733577981810 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577981810Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577981811 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577981811Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733577981811Cleaning up temporary data from old regions at 1733577981821 (+10 ms)Running coprocessor post-open hooks at 1733577981823 (+2 ms)Region opened successfully at 1733577981823 2024-12-07T13:26:21,825 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733577981787 2024-12-07T13:26:21,827 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T13:26:21,827 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T13:26:21,828 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,34499,1733577980811 2024-12-07T13:26:21,828 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,34499,1733577980811, state=OPEN 2024-12-07T13:26:21,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:26:21,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:26:21,923 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c7c455b68129,34499,1733577980811 2024-12-07T13:26:21,923 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:26:21,923 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:26:21,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T13:26:21,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,34499,1733577980811 in 289 msec 2024-12-07T13:26:21,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T13:26:21,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 726 msec 2024-12-07T13:26:21,935 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:26:21,935 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T13:26:21,937 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:26:21,937 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,34499,1733577980811, seqNum=-1] 2024-12-07T13:26:21,937 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:26:21,939 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60551, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:26:21,945 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 785 msec 2024-12-07T13:26:21,945 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733577981945, completionTime=-1 2024-12-07T13:26:21,945 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T13:26:21,945 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T13:26:21,947 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T13:26:21,947 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733578041947 2024-12-07T13:26:21,947 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733578101947 2024-12-07T13:26:21,947 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-07T13:26:21,947 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,33653,1733577980629-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,948 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,33653,1733577980629-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,948 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,33653,1733577980629-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,948 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c7c455b68129:33653, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,948 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,948 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:21,950 DEBUG [master/c7c455b68129:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T13:26:21,951 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.072sec 2024-12-07T13:26:21,951 INFO [master/c7c455b68129:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T13:26:21,951 INFO [master/c7c455b68129:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T13:26:21,951 INFO [master/c7c455b68129:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T13:26:21,951 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T13:26:21,951 INFO [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T13:26:21,951 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,33653,1733577980629-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:26:21,951 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,33653,1733577980629-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T13:26:21,954 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T13:26:21,954 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T13:26:21,954 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,33653,1733577980629-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:26:22,032 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d0375c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:26:22,033 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c7c455b68129,33653,-1 for getting cluster id 2024-12-07T13:26:22,033 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T13:26:22,035 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '246f049c-9c8e-4144-bfc2-441a448be4f3' 2024-12-07T13:26:22,035 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T13:26:22,036 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "246f049c-9c8e-4144-bfc2-441a448be4f3" 2024-12-07T13:26:22,036 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40d4dddf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:26:22,036 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c7c455b68129,33653,-1] 2024-12-07T13:26:22,036 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T13:26:22,037 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:26:22,038 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34838, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T13:26:22,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7584a18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:26:22,040 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:26:22,041 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,34499,1733577980811, seqNum=-1] 2024-12-07T13:26:22,042 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:26:22,043 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52942, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:26:22,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c7c455b68129,33653,1733577980629 2024-12-07T13:26:22,045 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:26:22,048 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T13:26:22,049 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T13:26:22,050 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is c7c455b68129,33653,1733577980629 2024-12-07T13:26:22,050 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@243373e2 2024-12-07T13:26:22,050 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T13:26:22,051 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34852, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T13:26:22,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-07T13:26:22,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-07T13:26:22,052 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:26:22,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:26:22,055 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T13:26:22,055 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:22,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-07T13:26:22,056 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T13:26:22,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T13:26:22,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741835_1011 (size=405) 2024-12-07T13:26:22,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741835_1011 (size=405) 2024-12-07T13:26:22,063 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b808239c370e78b82859be5e5b36fcc7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a 2024-12-07T13:26:22,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741836_1012 (size=88) 2024-12-07T13:26:22,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741836_1012 (size=88) 2024-12-07T13:26:22,069 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:26:22,069 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing b808239c370e78b82859be5e5b36fcc7, disabling compactions & flushes 2024-12-07T13:26:22,069 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:22,069 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:22,069 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. after waiting 0 ms 2024-12-07T13:26:22,069 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:22,069 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:22,069 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for b808239c370e78b82859be5e5b36fcc7: Waiting for close lock at 1733577982069Disabling compacts and flushes for region at 1733577982069Disabling writes for close at 1733577982069Writing region close event to WAL at 1733577982069Closed at 1733577982069 2024-12-07T13:26:22,070 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T13:26:22,071 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733577982071"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733577982071"}]},"ts":"1733577982071"} 2024-12-07T13:26:22,073 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T13:26:22,074 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T13:26:22,074 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733577982074"}]},"ts":"1733577982074"} 2024-12-07T13:26:22,077 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-07T13:26:22,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=b808239c370e78b82859be5e5b36fcc7, ASSIGN}] 2024-12-07T13:26:22,079 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=b808239c370e78b82859be5e5b36fcc7, ASSIGN 2024-12-07T13:26:22,081 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=b808239c370e78b82859be5e5b36fcc7, ASSIGN; state=OFFLINE, location=c7c455b68129,34499,1733577980811; forceNewPlan=false, retain=false 2024-12-07T13:26:22,231 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b808239c370e78b82859be5e5b36fcc7, regionState=OPENING, regionLocation=c7c455b68129,34499,1733577980811 2024-12-07T13:26:22,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=b808239c370e78b82859be5e5b36fcc7, ASSIGN because future has completed 2024-12-07T13:26:22,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b808239c370e78b82859be5e5b36fcc7, server=c7c455b68129,34499,1733577980811}] 2024-12-07T13:26:22,392 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:22,392 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b808239c370e78b82859be5e5b36fcc7, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:26:22,392 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,392 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:26:22,392 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,392 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,394 INFO [StoreOpener-b808239c370e78b82859be5e5b36fcc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,396 INFO [StoreOpener-b808239c370e78b82859be5e5b36fcc7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b808239c370e78b82859be5e5b36fcc7 columnFamilyName info 2024-12-07T13:26:22,396 DEBUG [StoreOpener-b808239c370e78b82859be5e5b36fcc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:26:22,396 INFO [StoreOpener-b808239c370e78b82859be5e5b36fcc7-1 {}] regionserver.HStore(327): Store=b808239c370e78b82859be5e5b36fcc7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:26:22,397 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,397 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,398 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,398 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,399 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,401 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,404 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:26:22,404 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b808239c370e78b82859be5e5b36fcc7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848134, jitterRate=0.07845848798751831}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T13:26:22,404 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:26:22,405 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b808239c370e78b82859be5e5b36fcc7: Running coprocessor pre-open hook at 1733577982393Writing region info on filesystem at 1733577982393Initializing all the Stores at 1733577982394 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733577982394Cleaning up temporary data from old regions at 1733577982399 (+5 ms)Running coprocessor post-open hooks at 1733577982404 (+5 ms)Region opened successfully at 1733577982405 (+1 ms) 2024-12-07T13:26:22,406 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7., pid=6, masterSystemTime=1733577982388 2024-12-07T13:26:22,408 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:22,408 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:22,409 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b808239c370e78b82859be5e5b36fcc7, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,34499,1733577980811 2024-12-07T13:26:22,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b808239c370e78b82859be5e5b36fcc7, server=c7c455b68129,34499,1733577980811 because future has completed 2024-12-07T13:26:22,415 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T13:26:22,416 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b808239c370e78b82859be5e5b36fcc7, server=c7c455b68129,34499,1733577980811 in 179 msec 2024-12-07T13:26:22,418 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T13:26:22,418 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=b808239c370e78b82859be5e5b36fcc7, ASSIGN in 339 msec 2024-12-07T13:26:22,419 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T13:26:22,419 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733577982419"}]},"ts":"1733577982419"} 2024-12-07T13:26:22,421 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-07T13:26:22,422 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T13:26:22,424 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 370 msec 2024-12-07T13:26:22,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:22,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:23,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:23,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:23,933 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:26:23,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:23,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:26:24,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:24,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:25,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:25,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:26,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:26,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:27,283 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T13:26:27,283 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-07T13:26:27,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:27,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:28,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T13:26:28,427 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T13:26:28,429 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:26:28,429 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T13:26:28,429 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T13:26:28,430 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T13:26:28,431 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:26:28,431 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-07T13:26:28,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:28,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:29,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:29,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:30,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:30,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:31,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:31,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:32,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T13:26:32,137 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-07T13:26:32,137 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-07T13:26:32,143 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:26:32,143 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:32,149 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7., hostname=c7c455b68129,34499,1733577980811, seqNum=2] 2024-12-07T13:26:32,156 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:26:32,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:26:32,162 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-07T13:26:32,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T13:26:32,164 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T13:26:32,165 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T13:26:32,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34499 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-07T13:26:32,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:32,329 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing b808239c370e78b82859be5e5b36fcc7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T13:26:32,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/00030acab59f402eb2b602b62fa1af4b is 1080, key is row0001/info:/1733577992150/Put/seqid=0 2024-12-07T13:26:32,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741837_1013 (size=6033) 2024-12-07T13:26:32,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741837_1013 (size=6033) 2024-12-07T13:26:32,355 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/00030acab59f402eb2b602b62fa1af4b 2024-12-07T13:26:32,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/00030acab59f402eb2b602b62fa1af4b as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/00030acab59f402eb2b602b62fa1af4b 2024-12-07T13:26:32,367 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/00030acab59f402eb2b602b62fa1af4b, entries=1, sequenceid=5, filesize=5.9 K 2024-12-07T13:26:32,368 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for b808239c370e78b82859be5e5b36fcc7 in 39ms, sequenceid=5, compaction requested=false 2024-12-07T13:26:32,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for b808239c370e78b82859be5e5b36fcc7: 2024-12-07T13:26:32,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:32,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-07T13:26:32,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-07T13:26:32,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T13:26:32,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 207 msec 2024-12-07T13:26:32,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 217 msec 2024-12-07T13:26:32,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:32,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:33,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:33,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:34,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:34,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:35,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:35,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:36,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:36,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:37,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:37,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:38,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:38,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:39,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:39,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:40,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:40,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:41,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:41,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:41,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 after 68091ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:26:41,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta after 68083ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor205.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T13:26:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-07T13:26:42,218 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-07T13:26:42,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:26:42,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:26:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-07T13:26:42,232 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-07T13:26:42,234 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T13:26:42,234 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T13:26:42,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34499 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-07T13:26:42,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:42,388 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing b808239c370e78b82859be5e5b36fcc7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T13:26:42,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/78504fb9f7a14b34964eff0cf37d0c45 is 1080, key is row0002/info:/1733578002221/Put/seqid=0 2024-12-07T13:26:42,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741838_1014 (size=6033) 2024-12-07T13:26:42,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741838_1014 (size=6033) 2024-12-07T13:26:42,398 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/78504fb9f7a14b34964eff0cf37d0c45 2024-12-07T13:26:42,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/78504fb9f7a14b34964eff0cf37d0c45 as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/78504fb9f7a14b34964eff0cf37d0c45 2024-12-07T13:26:42,409 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/78504fb9f7a14b34964eff0cf37d0c45, entries=1, sequenceid=9, filesize=5.9 K 2024-12-07T13:26:42,410 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for b808239c370e78b82859be5e5b36fcc7 in 22ms, sequenceid=9, compaction requested=false 2024-12-07T13:26:42,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for b808239c370e78b82859be5e5b36fcc7: 2024-12-07T13:26:42,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:42,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-07T13:26:42,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-07T13:26:42,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-07T13:26:42,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 178 msec 2024-12-07T13:26:42,416 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-12-07T13:26:42,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:42,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:43,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:43,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:44,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:44,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:45,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:45,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:46,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:46,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:47,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:47,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:48,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:48,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:49,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:49,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:50,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:50,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:50,613 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T13:26:51,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:51,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-07T13:26:52,276 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-07T13:26:52,280 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C34499%2C1733577980811.1733578012280 2024-12-07T13:26:52,286 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:52,286 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:52,286 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:52,286 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:52,287 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:26:52,287 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733577981421 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733578012280 2024-12-07T13:26:52,287 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45911:45911),(127.0.0.1/127.0.0.1:39617:39617)] 2024-12-07T13:26:52,287 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733577981421 is not closed yet, will try archiving it next time 2024-12-07T13:26:52,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741833_1009 (size=5546) 2024-12-07T13:26:52,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741833_1009 (size=5546) 2024-12-07T13:26:52,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:26:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:26:52,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-07T13:26:52,291 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-07T13:26:52,291 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T13:26:52,292 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T13:26:52,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34499 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-07T13:26:52,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:52,446 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing b808239c370e78b82859be5e5b36fcc7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T13:26:52,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/a09487a68514449b8143e8c6d5da15d3 is 1080, key is row0003/info:/1733578012278/Put/seqid=0 2024-12-07T13:26:52,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741840_1016 (size=6033) 2024-12-07T13:26:52,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741840_1016 (size=6033) 2024-12-07T13:26:52,462 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/a09487a68514449b8143e8c6d5da15d3 2024-12-07T13:26:52,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/a09487a68514449b8143e8c6d5da15d3 as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/a09487a68514449b8143e8c6d5da15d3 2024-12-07T13:26:52,474 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/a09487a68514449b8143e8c6d5da15d3, entries=1, sequenceid=13, filesize=5.9 K 2024-12-07T13:26:52,475 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for b808239c370e78b82859be5e5b36fcc7 in 29ms, sequenceid=13, compaction requested=true 2024-12-07T13:26:52,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for b808239c370e78b82859be5e5b36fcc7: 2024-12-07T13:26:52,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:26:52,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-07T13:26:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-07T13:26:52,479 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-07T13:26:52,479 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-12-07T13:26:52,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-12-07T13:26:52,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:52,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:53,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:53,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:54,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:54,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:55,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:55,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:56,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:56,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:57,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:57,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:58,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:58,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:59,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:26:59,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:00,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:00,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:01,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:01,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:02,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-07T13:27:02,366 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-07T13:27:02,366 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:02,367 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:02,368 DEBUG [Time-limited test {}] regionserver.HStore(1541): b808239c370e78b82859be5e5b36fcc7/info is initiating minor compaction (all files) 2024-12-07T13:27:02,368 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T13:27:02,368 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:02,368 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of b808239c370e78b82859be5e5b36fcc7/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:27:02,368 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/00030acab59f402eb2b602b62fa1af4b, hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/78504fb9f7a14b34964eff0cf37d0c45, hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/a09487a68514449b8143e8c6d5da15d3] into tmpdir=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp, totalSize=17.7 K 2024-12-07T13:27:02,369 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 00030acab59f402eb2b602b62fa1af4b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733577992150 2024-12-07T13:27:02,369 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 78504fb9f7a14b34964eff0cf37d0c45, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733578002221 2024-12-07T13:27:02,370 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a09487a68514449b8143e8c6d5da15d3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733578012278 2024-12-07T13:27:02,382 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): b808239c370e78b82859be5e5b36fcc7#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:02,382 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/966f3ca319514787bdc798f61426a878 is 1080, key is row0001/info:/1733577992150/Put/seqid=0 2024-12-07T13:27:02,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741841_1017 (size=8296) 2024-12-07T13:27:02,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741841_1017 (size=8296) 2024-12-07T13:27:02,391 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/966f3ca319514787bdc798f61426a878 as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/966f3ca319514787bdc798f61426a878 2024-12-07T13:27:02,397 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b808239c370e78b82859be5e5b36fcc7/info of b808239c370e78b82859be5e5b36fcc7 into 966f3ca319514787bdc798f61426a878(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:02,397 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for b808239c370e78b82859be5e5b36fcc7: 2024-12-07T13:27:02,399 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C34499%2C1733577980811.1733578022399 2024-12-07T13:27:02,405 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:02,405 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:02,405 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:02,405 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:02,405 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:02,405 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733578012280 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733578022399 2024-12-07T13:27:02,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741839_1015 (size=2520) 2024-12-07T13:27:02,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741839_1015 (size=2520) 2024-12-07T13:27:02,409 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733577981421 to hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/oldWALs/c7c455b68129%2C34499%2C1733577980811.1733577981421 2024-12-07T13:27:02,413 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45911:45911),(127.0.0.1/127.0.0.1:39617:39617)] 2024-12-07T13:27:02,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:27:02,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:27:02,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-07T13:27:02,415 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-07T13:27:02,416 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-07T13:27:02,416 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-07T13:27:02,551 INFO [master/c7c455b68129:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-07T13:27:02,551 INFO [master/c7c455b68129:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-07T13:27:02,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34499 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-07T13:27:02,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:27:02,569 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing b808239c370e78b82859be5e5b36fcc7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T13:27:02,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/77e32a35390e41fca2ab1ff8eaf5c54b is 1080, key is row0000/info:/1733578022398/Put/seqid=0 2024-12-07T13:27:02,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741843_1019 (size=6033) 2024-12-07T13:27:02,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741843_1019 (size=6033) 2024-12-07T13:27:02,578 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/77e32a35390e41fca2ab1ff8eaf5c54b 2024-12-07T13:27:02,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/77e32a35390e41fca2ab1ff8eaf5c54b as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/77e32a35390e41fca2ab1ff8eaf5c54b 2024-12-07T13:27:02,590 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/77e32a35390e41fca2ab1ff8eaf5c54b, entries=1, sequenceid=18, filesize=5.9 K 2024-12-07T13:27:02,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:02,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:02,591 INFO [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for b808239c370e78b82859be5e5b36fcc7 in 22ms, sequenceid=18, compaction requested=false 2024-12-07T13:27:02,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for b808239c370e78b82859be5e5b36fcc7: 2024-12-07T13:27:02,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:27:02,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-07T13:27:02,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-07T13:27:02,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-07T13:27:02,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-12-07T13:27:02,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-12-07T13:27:03,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:03,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:04,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:04,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:05,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:05,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:06,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:06,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:07,392 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b808239c370e78b82859be5e5b36fcc7, had cached 0 bytes from a total of 14329 2024-12-07T13:27:07,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:07,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:08,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:08,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:09,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:09,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:10,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:10,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:11,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:11,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:12,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-07T13:27:12,466 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-07T13:27:12,469 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C34499%2C1733577980811.1733578032469 2024-12-07T13:27:12,476 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,476 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,476 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,477 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,477 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,477 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733578022399 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733578032469 2024-12-07T13:27:12,478 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45911:45911),(127.0.0.1/127.0.0.1:39617:39617)] 2024-12-07T13:27:12,478 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733578022399 is not closed yet, will try archiving it next time 2024-12-07T13:27:12,478 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/WALs/c7c455b68129,34499,1733577980811/c7c455b68129%2C34499%2C1733577980811.1733578012280 to hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/oldWALs/c7c455b68129%2C34499%2C1733577980811.1733578012280 2024-12-07T13:27:12,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T13:27:12,478 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:27:12,478 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:27:12,478 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:12,479 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:12,479 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T13:27:12,479 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T13:27:12,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741842_1018 (size=2026) 2024-12-07T13:27:12,479 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1495462798, stopped=false 2024-12-07T13:27:12,479 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c7c455b68129,33653,1733577980629 2024-12-07T13:27:12,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741842_1018 (size=2026) 2024-12-07T13:27:12,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:27:12,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:27:12,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:12,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:12,539 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:27:12,540 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:27:12,540 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:27:12,540 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:12,540 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:27:12,541 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:27:12,541 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c7c455b68129,34499,1733577980811' ***** 2024-12-07T13:27:12,541 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T13:27:12,541 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T13:27:12,542 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T13:27:12,542 INFO [RS:0;c7c455b68129:34499 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T13:27:12,542 INFO [RS:0;c7c455b68129:34499 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T13:27:12,542 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(3091): Received CLOSE for b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:27:12,543 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(959): stopping server c7c455b68129,34499,1733577980811 2024-12-07T13:27:12,543 INFO [RS:0;c7c455b68129:34499 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:27:12,543 INFO [RS:0;c7c455b68129:34499 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c7c455b68129:34499. 2024-12-07T13:27:12,543 DEBUG [RS:0;c7c455b68129:34499 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:27:12,543 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b808239c370e78b82859be5e5b36fcc7, disabling compactions & flushes 2024-12-07T13:27:12,543 DEBUG [RS:0;c7c455b68129:34499 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:12,543 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:27:12,543 INFO [RS:0;c7c455b68129:34499 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T13:27:12,544 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:27:12,544 INFO [RS:0;c7c455b68129:34499 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T13:27:12,544 INFO [RS:0;c7c455b68129:34499 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T13:27:12,544 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. after waiting 0 ms 2024-12-07T13:27:12,544 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T13:27:12,544 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:27:12,544 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing b808239c370e78b82859be5e5b36fcc7 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-07T13:27:12,544 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-07T13:27:12,544 DEBUG [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, b808239c370e78b82859be5e5b36fcc7=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.} 2024-12-07T13:27:12,544 DEBUG [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b808239c370e78b82859be5e5b36fcc7 2024-12-07T13:27:12,545 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:27:12,545 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:27:12,545 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:27:12,545 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:27:12,545 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:27:12,545 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-07T13:27:12,551 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/0b89e427c0ef40e097506ed50e701ba2 is 1080, key is row0001/info:/1733578032467/Put/seqid=0 2024-12-07T13:27:12,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741845_1021 (size=6033) 2024-12-07T13:27:12,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741845_1021 (size=6033) 2024-12-07T13:27:12,558 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/0b89e427c0ef40e097506ed50e701ba2 2024-12-07T13:27:12,564 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/.tmp/info/c4847fe53a604405b8fd321b36d3f088 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7./info:regioninfo/1733577982409/Put/seqid=0 2024-12-07T13:27:12,564 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/.tmp/info/0b89e427c0ef40e097506ed50e701ba2 as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/0b89e427c0ef40e097506ed50e701ba2 2024-12-07T13:27:12,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741846_1022 (size=7308) 2024-12-07T13:27:12,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741846_1022 (size=7308) 2024-12-07T13:27:12,569 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/.tmp/info/c4847fe53a604405b8fd321b36d3f088 2024-12-07T13:27:12,570 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/0b89e427c0ef40e097506ed50e701ba2, entries=1, sequenceid=22, filesize=5.9 K 2024-12-07T13:27:12,571 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for b808239c370e78b82859be5e5b36fcc7 in 27ms, sequenceid=22, compaction requested=true 2024-12-07T13:27:12,572 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/00030acab59f402eb2b602b62fa1af4b, hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/78504fb9f7a14b34964eff0cf37d0c45, hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/a09487a68514449b8143e8c6d5da15d3] to archive 2024-12-07T13:27:12,572 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T13:27:12,574 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/00030acab59f402eb2b602b62fa1af4b to hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/00030acab59f402eb2b602b62fa1af4b 2024-12-07T13:27:12,575 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/78504fb9f7a14b34964eff0cf37d0c45 to hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/78504fb9f7a14b34964eff0cf37d0c45 2024-12-07T13:27:12,577 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/a09487a68514449b8143e8c6d5da15d3 to hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/info/a09487a68514449b8143e8c6d5da15d3 2024-12-07T13:27:12,577 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c7c455b68129:33653 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-07T13:27:12,577 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [00030acab59f402eb2b602b62fa1af4b=6033, 78504fb9f7a14b34964eff0cf37d0c45=6033, a09487a68514449b8143e8c6d5da15d3=6033] 2024-12-07T13:27:12,581 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/b808239c370e78b82859be5e5b36fcc7/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-07T13:27:12,581 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:27:12,582 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b808239c370e78b82859be5e5b36fcc7: Waiting for close lock at 1733578032543Running coprocessor pre-close hooks at 1733578032543Disabling compacts and flushes for region at 1733578032543Disabling writes for close at 1733578032544 (+1 ms)Obtaining lock to block concurrent updates at 1733578032544Preparing flush snapshotting stores in b808239c370e78b82859be5e5b36fcc7 at 1733578032544Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733578032545 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. at 1733578032547 (+2 ms)Flushing b808239c370e78b82859be5e5b36fcc7/info: creating writer at 1733578032547Flushing b808239c370e78b82859be5e5b36fcc7/info: appending metadata at 1733578032550 (+3 ms)Flushing b808239c370e78b82859be5e5b36fcc7/info: closing flushed file at 1733578032550Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a4442ef: reopening flushed file at 1733578032563 (+13 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for b808239c370e78b82859be5e5b36fcc7 in 27ms, sequenceid=22, compaction requested=true at 1733578032571 (+8 ms)Writing region close event to WAL at 1733578032578 (+7 ms)Running coprocessor post-close hooks at 1733578032581 (+3 ms)Closed at 1733578032581 2024-12-07T13:27:12,582 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733577982051.b808239c370e78b82859be5e5b36fcc7. 2024-12-07T13:27:12,588 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/.tmp/ns/676e9b6ca1904021b144d42c3f2a70d4 is 43, key is default/ns:d/1733577981939/Put/seqid=0 2024-12-07T13:27:12,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741847_1023 (size=5153) 2024-12-07T13:27:12,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741847_1023 (size=5153) 2024-12-07T13:27:12,593 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/.tmp/ns/676e9b6ca1904021b144d42c3f2a70d4 2024-12-07T13:27:12,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:12,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:12,610 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/.tmp/table/c251cec7a5a24bfe9abe391871c14c8e is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733577982419/Put/seqid=0 2024-12-07T13:27:12,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741848_1024 (size=5508) 2024-12-07T13:27:12,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741848_1024 (size=5508) 2024-12-07T13:27:12,615 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/.tmp/table/c251cec7a5a24bfe9abe391871c14c8e 2024-12-07T13:27:12,619 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/.tmp/info/c4847fe53a604405b8fd321b36d3f088 as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/info/c4847fe53a604405b8fd321b36d3f088 2024-12-07T13:27:12,625 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/info/c4847fe53a604405b8fd321b36d3f088, entries=10, sequenceid=11, filesize=7.1 K 2024-12-07T13:27:12,626 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/.tmp/ns/676e9b6ca1904021b144d42c3f2a70d4 as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/ns/676e9b6ca1904021b144d42c3f2a70d4 2024-12-07T13:27:12,632 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/ns/676e9b6ca1904021b144d42c3f2a70d4, entries=2, sequenceid=11, filesize=5.0 K 2024-12-07T13:27:12,633 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/.tmp/table/c251cec7a5a24bfe9abe391871c14c8e as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/table/c251cec7a5a24bfe9abe391871c14c8e 2024-12-07T13:27:12,638 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/table/c251cec7a5a24bfe9abe391871c14c8e, entries=2, sequenceid=11, filesize=5.4 K 2024-12-07T13:27:12,639 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false 2024-12-07T13:27:12,642 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-07T13:27:12,643 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:27:12,643 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:27:12,643 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733578032544Running coprocessor pre-close hooks at 1733578032544Disabling compacts and flushes for region at 1733578032544Disabling writes for close at 1733578032545 (+1 ms)Obtaining lock to block concurrent updates at 1733578032545Preparing flush snapshotting stores in 1588230740 at 1733578032545Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733578032546 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733578032547 (+1 ms)Flushing 1588230740/info: creating writer at 1733578032547Flushing 1588230740/info: appending metadata at 1733578032563 (+16 ms)Flushing 1588230740/info: closing flushed file at 1733578032563Flushing 1588230740/ns: creating writer at 1733578032574 (+11 ms)Flushing 1588230740/ns: appending metadata at 1733578032587 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733578032587Flushing 1588230740/table: creating writer at 1733578032598 (+11 ms)Flushing 1588230740/table: appending metadata at 1733578032610 (+12 ms)Flushing 1588230740/table: closing flushed file at 1733578032610Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f45eb6e: reopening flushed file at 1733578032619 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b6656e: reopening flushed file at 1733578032625 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@311972f0: reopening flushed file at 1733578032632 (+7 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false at 1733578032639 (+7 ms)Writing region close event to WAL at 1733578032640 (+1 ms)Running coprocessor post-close hooks at 1733578032643 (+3 ms)Closed at 1733578032643 2024-12-07T13:27:12,643 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T13:27:12,745 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(976): stopping server c7c455b68129,34499,1733577980811; all regions closed. 2024-12-07T13:27:12,746 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,746 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,746 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,747 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,747 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741834_1010 (size=3306) 2024-12-07T13:27:12,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741834_1010 (size=3306) 2024-12-07T13:27:12,756 DEBUG [RS:0;c7c455b68129:34499 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/oldWALs 2024-12-07T13:27:12,756 INFO [RS:0;c7c455b68129:34499 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C34499%2C1733577980811.meta:.meta(num 1733577981801) 2024-12-07T13:27:12,756 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,756 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,756 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,757 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,757 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741844_1020 (size=1252) 2024-12-07T13:27:12,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741844_1020 (size=1252) 2024-12-07T13:27:12,761 DEBUG [RS:0;c7c455b68129:34499 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/oldWALs 2024-12-07T13:27:12,761 INFO [RS:0;c7c455b68129:34499 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C34499%2C1733577980811:(num 1733578032469) 2024-12-07T13:27:12,761 DEBUG [RS:0;c7c455b68129:34499 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:12,761 INFO [RS:0;c7c455b68129:34499 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:27:12,761 INFO [RS:0;c7c455b68129:34499 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:27:12,761 INFO [RS:0;c7c455b68129:34499 {}] hbase.ChoreService(370): Chore service for: regionserver/c7c455b68129:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T13:27:12,762 INFO [RS:0;c7c455b68129:34499 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:27:12,762 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:27:12,762 INFO [RS:0;c7c455b68129:34499 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34499 2024-12-07T13:27:12,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c7c455b68129,34499,1733577980811 2024-12-07T13:27:12,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:27:12,831 INFO [RS:0;c7c455b68129:34499 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:27:12,843 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c7c455b68129,34499,1733577980811] 2024-12-07T13:27:12,854 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c7c455b68129,34499,1733577980811 already deleted, retry=false 2024-12-07T13:27:12,854 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c7c455b68129,34499,1733577980811 expired; onlineServers=0 2024-12-07T13:27:12,854 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c7c455b68129,33653,1733577980629' ***** 2024-12-07T13:27:12,854 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T13:27:12,854 INFO [M:0;c7c455b68129:33653 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:27:12,854 INFO [M:0;c7c455b68129:33653 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:27:12,854 DEBUG [M:0;c7c455b68129:33653 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T13:27:12,854 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T13:27:12,854 DEBUG [M:0;c7c455b68129:33653 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T13:27:12,854 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577981170 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733577981170,5,FailOnTimeoutGroup] 2024-12-07T13:27:12,854 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577981171 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733577981171,5,FailOnTimeoutGroup] 2024-12-07T13:27:12,854 INFO [M:0;c7c455b68129:33653 {}] hbase.ChoreService(370): Chore service for: master/c7c455b68129:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T13:27:12,854 INFO [M:0;c7c455b68129:33653 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:27:12,855 DEBUG [M:0;c7c455b68129:33653 {}] master.HMaster(1795): Stopping service threads 2024-12-07T13:27:12,855 INFO [M:0;c7c455b68129:33653 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T13:27:12,855 INFO [M:0;c7c455b68129:33653 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:27:12,855 INFO [M:0;c7c455b68129:33653 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T13:27:12,855 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T13:27:12,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T13:27:12,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:12,864 DEBUG [M:0;c7c455b68129:33653 {}] zookeeper.ZKUtil(347): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T13:27:12,864 WARN [M:0;c7c455b68129:33653 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T13:27:12,865 INFO [M:0;c7c455b68129:33653 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/.lastflushedseqids 2024-12-07T13:27:12,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741849_1025 (size=130) 2024-12-07T13:27:12,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741849_1025 (size=130) 2024-12-07T13:27:12,872 INFO [M:0;c7c455b68129:33653 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T13:27:12,872 INFO [M:0;c7c455b68129:33653 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T13:27:12,872 DEBUG [M:0;c7c455b68129:33653 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:27:12,872 INFO [M:0;c7c455b68129:33653 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:12,872 DEBUG [M:0;c7c455b68129:33653 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:12,872 DEBUG [M:0;c7c455b68129:33653 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:27:12,872 DEBUG [M:0;c7c455b68129:33653 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:12,872 INFO [M:0;c7c455b68129:33653 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.55 KB heapSize=54.94 KB 2024-12-07T13:27:12,891 DEBUG [M:0;c7c455b68129:33653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2079e21fa62c4c5b9ec1ff808978f974 is 82, key is hbase:meta,,1/info:regioninfo/1733577981828/Put/seqid=0 2024-12-07T13:27:12,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741850_1026 (size=5672) 2024-12-07T13:27:12,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741850_1026 (size=5672) 2024-12-07T13:27:12,895 INFO [M:0;c7c455b68129:33653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2079e21fa62c4c5b9ec1ff808978f974 2024-12-07T13:27:12,914 DEBUG [M:0;c7c455b68129:33653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c3ccd28df61a43408db874351ef38577 is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733577982424/Put/seqid=0 2024-12-07T13:27:12,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741851_1027 (size=7819) 2024-12-07T13:27:12,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741851_1027 (size=7819) 2024-12-07T13:27:12,919 INFO [M:0;c7c455b68129:33653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c3ccd28df61a43408db874351ef38577 2024-12-07T13:27:12,923 INFO [M:0;c7c455b68129:33653 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c3ccd28df61a43408db874351ef38577 2024-12-07T13:27:12,935 DEBUG [M:0;c7c455b68129:33653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cb224ef32b204720a4b9ac3a4028e8ce is 69, key is c7c455b68129,34499,1733577980811/rs:state/1733577981268/Put/seqid=0 2024-12-07T13:27:12,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741852_1028 (size=5156) 2024-12-07T13:27:12,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741852_1028 (size=5156) 2024-12-07T13:27:12,940 INFO [M:0;c7c455b68129:33653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cb224ef32b204720a4b9ac3a4028e8ce 2024-12-07T13:27:12,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:12,944 INFO [RS:0;c7c455b68129:34499 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:27:12,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34499-0x1000074c18e0001, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:12,944 INFO [RS:0;c7c455b68129:34499 {}] regionserver.HRegionServer(1031): Exiting; stopping=c7c455b68129,34499,1733577980811; zookeeper connection closed. 2024-12-07T13:27:12,944 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@741a85cb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@741a85cb 2024-12-07T13:27:12,944 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T13:27:12,963 DEBUG [M:0;c7c455b68129:33653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a945a4dac7d54e7aa8cb123887ba15ad is 52, key is load_balancer_on/state:d/1733577982047/Put/seqid=0 2024-12-07T13:27:12,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741853_1029 (size=5056) 2024-12-07T13:27:12,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741853_1029 (size=5056) 2024-12-07T13:27:12,968 INFO [M:0;c7c455b68129:33653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a945a4dac7d54e7aa8cb123887ba15ad 2024-12-07T13:27:12,973 DEBUG [M:0;c7c455b68129:33653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/2079e21fa62c4c5b9ec1ff808978f974 as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2079e21fa62c4c5b9ec1ff808978f974 2024-12-07T13:27:12,978 INFO [M:0;c7c455b68129:33653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/2079e21fa62c4c5b9ec1ff808978f974, entries=8, sequenceid=121, filesize=5.5 K 2024-12-07T13:27:12,979 DEBUG [M:0;c7c455b68129:33653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c3ccd28df61a43408db874351ef38577 as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c3ccd28df61a43408db874351ef38577 2024-12-07T13:27:12,984 INFO [M:0;c7c455b68129:33653 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c3ccd28df61a43408db874351ef38577 2024-12-07T13:27:12,985 INFO [M:0;c7c455b68129:33653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c3ccd28df61a43408db874351ef38577, entries=14, sequenceid=121, filesize=7.6 K 2024-12-07T13:27:12,986 DEBUG [M:0;c7c455b68129:33653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cb224ef32b204720a4b9ac3a4028e8ce as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cb224ef32b204720a4b9ac3a4028e8ce 2024-12-07T13:27:12,990 INFO [M:0;c7c455b68129:33653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cb224ef32b204720a4b9ac3a4028e8ce, entries=1, sequenceid=121, filesize=5.0 K 2024-12-07T13:27:12,991 DEBUG [M:0;c7c455b68129:33653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a945a4dac7d54e7aa8cb123887ba15ad as hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a945a4dac7d54e7aa8cb123887ba15ad 2024-12-07T13:27:12,996 INFO [M:0;c7c455b68129:33653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35389/user/jenkins/test-data/6ae24e44-b208-ee4e-212c-fa697bebf49a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a945a4dac7d54e7aa8cb123887ba15ad, entries=1, sequenceid=121, filesize=4.9 K 2024-12-07T13:27:12,997 INFO [M:0;c7c455b68129:33653 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.55 KB/44599, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=121, compaction requested=false 2024-12-07T13:27:12,999 INFO [M:0;c7c455b68129:33653 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:12,999 DEBUG [M:0;c7c455b68129:33653 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733578032872Disabling compacts and flushes for region at 1733578032872Disabling writes for close at 1733578032872Obtaining lock to block concurrent updates at 1733578032872Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733578032872Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44599, getHeapSize=56192, getOffHeapSize=0, getCellsCount=140 at 1733578032873 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733578032873Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733578032873Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733578032890 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733578032890Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733578032899 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733578032913 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733578032913Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733578032923 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733578032935 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733578032935Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733578032944 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733578032962 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733578032962Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79eacd66: reopening flushed file at 1733578032973 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5417321e: reopening flushed file at 1733578032978 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33312991: reopening flushed file at 1733578032985 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1712c16b: reopening flushed file at 1733578032991 (+6 ms)Finished flush of dataSize ~43.55 KB/44599, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=121, compaction requested=false at 1733578032997 (+6 ms)Writing region close event to WAL at 1733578032998 (+1 ms)Closed at 1733578032998 2024-12-07T13:27:12,999 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,999 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,999 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,999 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:12,999 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:13,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34847 is added to blk_1073741830_1006 (size=52996) 2024-12-07T13:27:13,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33687 is added to blk_1073741830_1006 (size=52996) 2024-12-07T13:27:13,002 INFO [M:0;c7c455b68129:33653 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T13:27:13,002 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:27:13,002 INFO [M:0;c7c455b68129:33653 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33653 2024-12-07T13:27:13,002 INFO [M:0;c7c455b68129:33653 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:27:13,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:13,112 INFO [M:0;c7c455b68129:33653 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:27:13,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33653-0x1000074c18e0000, quorum=127.0.0.1:60915, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:13,117 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c429e05{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:27:13,118 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1f411ba{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:27:13,118 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:27:13,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@280e4ad5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:27:13,118 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d7944b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/hadoop.log.dir/,STOPPED} 2024-12-07T13:27:13,121 WARN [BP-2093863607-172.17.0.3-1733577977871 heartbeating to localhost/127.0.0.1:35389 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:27:13,121 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:27:13,121 WARN [BP-2093863607-172.17.0.3-1733577977871 heartbeating to localhost/127.0.0.1:35389 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2093863607-172.17.0.3-1733577977871 (Datanode Uuid 6007c426-a7fc-4892-9352-97996599d0ac) service to localhost/127.0.0.1:35389 2024-12-07T13:27:13,121 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:27:13,122 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/data/data3/current/BP-2093863607-172.17.0.3-1733577977871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:13,122 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/data/data4/current/BP-2093863607-172.17.0.3-1733577977871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:13,122 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:27:13,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d40a54d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:27:13,125 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74b5ebca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:27:13,125 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:27:13,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@438a440e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:27:13,125 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72ef9fa2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/hadoop.log.dir/,STOPPED} 2024-12-07T13:27:13,127 WARN [BP-2093863607-172.17.0.3-1733577977871 heartbeating to localhost/127.0.0.1:35389 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:27:13,127 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:27:13,127 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:27:13,127 WARN [BP-2093863607-172.17.0.3-1733577977871 heartbeating to localhost/127.0.0.1:35389 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2093863607-172.17.0.3-1733577977871 (Datanode Uuid bf4956d9-ddc8-4f86-a001-0f12b79b7c00) service to localhost/127.0.0.1:35389 2024-12-07T13:27:13,127 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/data/data1/current/BP-2093863607-172.17.0.3-1733577977871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:13,128 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/cluster_13aa47e1-501f-0d38-6bb4-0939deb71df6/data/data2/current/BP-2093863607-172.17.0.3-1733577977871 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:13,128 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:27:13,134 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e21aaf2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:27:13,134 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6dc3ea71{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:27:13,134 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:27:13,134 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f08894b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:27:13,134 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3df88721{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/hadoop.log.dir/,STOPPED} 2024-12-07T13:27:13,139 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T13:27:13,155 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T13:27:13,163 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:35389 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35389 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35389 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35389 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35389 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: regionserver/c7c455b68129:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35389 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=134 (was 54) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=15771 (was 15413) - AvailableMemoryMB LEAK? - 2024-12-07T13:27:13,170 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=134, ProcessCount=11, AvailableMemoryMB=15771 2024-12-07T13:27:13,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T13:27:13,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/hadoop.log.dir so I do NOT create it in target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab 2024-12-07T13:27:13,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2112764b-01a2-c5c5-cacb-7511cece2d5d/hadoop.tmp.dir so I do NOT create it in target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab 2024-12-07T13:27:13,170 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940, deleteOnExit=true 2024-12-07T13:27:13,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T13:27:13,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/test.cache.data in system properties and HBase conf 2024-12-07T13:27:13,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/hadoop.log.dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T13:27:13,171 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/nfs.dump.dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/java.io.tmpdir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:27:13,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T13:27:13,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T13:27:13,184 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:27:13,293 INFO [regionserver/c7c455b68129:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:27:13,554 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:27:13,557 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:27:13,558 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:27:13,558 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:27:13,559 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:27:13,559 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:27:13,559 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75d9b484{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:27:13,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@516e643a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:27:13,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:13,601 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:13,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5db25599{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/java.io.tmpdir/jetty-localhost-44855-hadoop-hdfs-3_4_1-tests_jar-_-any-17460042857301942505/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:27:13,651 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f8818bb{HTTP/1.1, (http/1.1)}{localhost:44855} 2024-12-07T13:27:13,651 INFO [Time-limited test {}] server.Server(415): Started @251491ms 2024-12-07T13:27:13,661 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:27:13,945 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:27:13,948 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:27:13,948 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:27:13,948 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:27:13,948 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:27:13,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719b1e37{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:27:13,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3057e5b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:27:14,039 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3523e770{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/java.io.tmpdir/jetty-localhost-44301-hadoop-hdfs-3_4_1-tests_jar-_-any-15592935770418371182/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:27:14,039 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f53b1c{HTTP/1.1, (http/1.1)}{localhost:44301} 2024-12-07T13:27:14,039 INFO [Time-limited test {}] server.Server(415): Started @251880ms 2024-12-07T13:27:14,040 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:27:14,064 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:27:14,067 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:27:14,068 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:27:14,068 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:27:14,068 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:27:14,068 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4445ac53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:27:14,068 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a646ba1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:27:14,157 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3dc4994c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/java.io.tmpdir/jetty-localhost-42853-hadoop-hdfs-3_4_1-tests_jar-_-any-14197521275994254243/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:27:14,158 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@25f949b{HTTP/1.1, (http/1.1)}{localhost:42853} 2024-12-07T13:27:14,158 INFO [Time-limited test {}] server.Server(415): Started @251998ms 2024-12-07T13:27:14,159 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:27:14,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:14,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:15,212 WARN [Thread-1962 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/data/data2/current/BP-622239927-172.17.0.3-1733578033187/current, will proceed with Du for space computation calculation, 2024-12-07T13:27:15,212 WARN [Thread-1961 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/data/data1/current/BP-622239927-172.17.0.3-1733578033187/current, will proceed with Du for space computation calculation, 2024-12-07T13:27:15,229 WARN [Thread-1925 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:27:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3111a99658eb853 with lease ID 0xd2eb1709f9f5ad2f: Processing first storage report for DS-1aa7bdb0-b93e-467c-8690-581895a8f934 from datanode DatanodeRegistration(127.0.0.1:32921, datanodeUuid=99df9b7b-45d0-4690-a3c4-aeca82fe250f, infoPort=39449, infoSecurePort=0, ipcPort=46523, storageInfo=lv=-57;cid=testClusterID;nsid=1221027508;c=1733578033187) 2024-12-07T13:27:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3111a99658eb853 with lease ID 0xd2eb1709f9f5ad2f: from storage DS-1aa7bdb0-b93e-467c-8690-581895a8f934 node DatanodeRegistration(127.0.0.1:32921, datanodeUuid=99df9b7b-45d0-4690-a3c4-aeca82fe250f, infoPort=39449, infoSecurePort=0, ipcPort=46523, storageInfo=lv=-57;cid=testClusterID;nsid=1221027508;c=1733578033187), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:27:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa3111a99658eb853 with lease ID 0xd2eb1709f9f5ad2f: Processing first storage report for DS-05d73682-c389-4f12-8fcb-06cc0d0837d7 from datanode DatanodeRegistration(127.0.0.1:32921, datanodeUuid=99df9b7b-45d0-4690-a3c4-aeca82fe250f, infoPort=39449, infoSecurePort=0, ipcPort=46523, storageInfo=lv=-57;cid=testClusterID;nsid=1221027508;c=1733578033187) 2024-12-07T13:27:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa3111a99658eb853 with lease ID 0xd2eb1709f9f5ad2f: from storage DS-05d73682-c389-4f12-8fcb-06cc0d0837d7 node DatanodeRegistration(127.0.0.1:32921, datanodeUuid=99df9b7b-45d0-4690-a3c4-aeca82fe250f, infoPort=39449, infoSecurePort=0, ipcPort=46523, storageInfo=lv=-57;cid=testClusterID;nsid=1221027508;c=1733578033187), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:27:15,324 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/data/data3/current/BP-622239927-172.17.0.3-1733578033187/current, will proceed with Du for space computation calculation, 2024-12-07T13:27:15,324 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/data/data4/current/BP-622239927-172.17.0.3-1733578033187/current, will proceed with Du for space computation calculation, 2024-12-07T13:27:15,337 WARN [Thread-1948 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:27:15,339 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55ff208a8e631815 with lease ID 0xd2eb1709f9f5ad30: Processing first storage report for DS-3bbc5a4e-be02-4649-aa75-63cdd11c41ee from datanode DatanodeRegistration(127.0.0.1:34771, datanodeUuid=11bf8af6-d51f-452a-b8a3-d1ac17aa06ee, infoPort=39113, infoSecurePort=0, ipcPort=45593, storageInfo=lv=-57;cid=testClusterID;nsid=1221027508;c=1733578033187) 2024-12-07T13:27:15,339 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55ff208a8e631815 with lease ID 0xd2eb1709f9f5ad30: from storage DS-3bbc5a4e-be02-4649-aa75-63cdd11c41ee node DatanodeRegistration(127.0.0.1:34771, datanodeUuid=11bf8af6-d51f-452a-b8a3-d1ac17aa06ee, infoPort=39113, infoSecurePort=0, ipcPort=45593, storageInfo=lv=-57;cid=testClusterID;nsid=1221027508;c=1733578033187), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:27:15,339 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x55ff208a8e631815 with lease ID 0xd2eb1709f9f5ad30: Processing first storage report for DS-250c35f4-b10a-4e40-80e0-ef0101bea835 from datanode DatanodeRegistration(127.0.0.1:34771, datanodeUuid=11bf8af6-d51f-452a-b8a3-d1ac17aa06ee, infoPort=39113, infoSecurePort=0, ipcPort=45593, storageInfo=lv=-57;cid=testClusterID;nsid=1221027508;c=1733578033187) 2024-12-07T13:27:15,339 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x55ff208a8e631815 with lease ID 0xd2eb1709f9f5ad30: from storage DS-250c35f4-b10a-4e40-80e0-ef0101bea835 node DatanodeRegistration(127.0.0.1:34771, datanodeUuid=11bf8af6-d51f-452a-b8a3-d1ac17aa06ee, infoPort=39113, infoSecurePort=0, ipcPort=45593, storageInfo=lv=-57;cid=testClusterID;nsid=1221027508;c=1733578033187), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:27:15,388 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab 2024-12-07T13:27:15,391 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/zookeeper_0, clientPort=64288, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T13:27:15,391 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64288 2024-12-07T13:27:15,391 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:15,393 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:15,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:27:15,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:27:15,401 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f with version=8 2024-12-07T13:27:15,401 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/hbase-staging 2024-12-07T13:27:15,403 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:27:15,403 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:15,403 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:15,403 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:27:15,403 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:15,403 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:27:15,403 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T13:27:15,403 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:27:15,404 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38977 2024-12-07T13:27:15,405 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38977 connecting to ZooKeeper ensemble=127.0.0.1:64288 2024-12-07T13:27:15,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389770x0, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:27:15,482 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38977-0x100007597840000 connected 2024-12-07T13:27:15,570 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:15,573 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:15,576 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:27:15,577 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f, hbase.cluster.distributed=false 2024-12-07T13:27:15,578 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:27:15,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38977 2024-12-07T13:27:15,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38977 2024-12-07T13:27:15,579 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38977 2024-12-07T13:27:15,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38977 2024-12-07T13:27:15,580 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38977 2024-12-07T13:27:15,593 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:27:15,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:15,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:15,594 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:27:15,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:15,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:27:15,594 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T13:27:15,594 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:27:15,594 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:34335 2024-12-07T13:27:15,595 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34335 connecting to ZooKeeper ensemble=127.0.0.1:64288 2024-12-07T13:27:15,596 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:15,597 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:15,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:15,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:15,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:343350x0, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:27:15,610 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34335-0x100007597840001 connected 2024-12-07T13:27:15,610 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:27:15,610 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T13:27:15,610 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T13:27:15,611 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T13:27:15,612 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:27:15,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34335 2024-12-07T13:27:15,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34335 2024-12-07T13:27:15,613 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34335 2024-12-07T13:27:15,613 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34335 2024-12-07T13:27:15,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34335 2024-12-07T13:27:15,627 DEBUG [M:0;c7c455b68129:38977 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c7c455b68129:38977 2024-12-07T13:27:15,627 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c7c455b68129,38977,1733578035403 2024-12-07T13:27:15,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:27:15,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:27:15,633 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c7c455b68129,38977,1733578035403 2024-12-07T13:27:15,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T13:27:15,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:15,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:15,644 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T13:27:15,644 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c7c455b68129,38977,1733578035403 from backup master directory 2024-12-07T13:27:15,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:27:15,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c7c455b68129,38977,1733578035403 2024-12-07T13:27:15,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:27:15,654 WARN [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:27:15,654 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c7c455b68129,38977,1733578035403 2024-12-07T13:27:15,658 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/hbase.id] with ID: 05534688-1dae-48e6-a34e-cc3051ca6ca6 2024-12-07T13:27:15,658 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/.tmp/hbase.id 2024-12-07T13:27:15,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:27:15,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:27:15,663 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/.tmp/hbase.id]:[hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/hbase.id] 2024-12-07T13:27:15,674 INFO [master/c7c455b68129:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:15,674 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T13:27:15,675 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-07T13:27:15,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:15,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:15,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:27:15,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:27:15,691 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:27:15,692 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T13:27:15,692 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:27:15,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:27:15,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:27:15,700 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store 2024-12-07T13:27:15,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:27:15,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:27:15,709 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:15,709 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:27:15,709 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:15,709 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:15,710 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:27:15,710 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:15,710 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:15,710 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733578035709Disabling compacts and flushes for region at 1733578035709Disabling writes for close at 1733578035710 (+1 ms)Writing region close event to WAL at 1733578035710Closed at 1733578035710 2024-12-07T13:27:15,710 WARN [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/.initializing 2024-12-07T13:27:15,710 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/WALs/c7c455b68129,38977,1733578035403 2024-12-07T13:27:15,712 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C38977%2C1733578035403, suffix=, logDir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/WALs/c7c455b68129,38977,1733578035403, archiveDir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/oldWALs, maxLogs=10 2024-12-07T13:27:15,713 INFO [master/c7c455b68129:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C38977%2C1733578035403.1733578035713 2024-12-07T13:27:15,717 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/WALs/c7c455b68129,38977,1733578035403/c7c455b68129%2C38977%2C1733578035403.1733578035713 2024-12-07T13:27:15,718 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39113:39113),(127.0.0.1/127.0.0.1:39449:39449)] 2024-12-07T13:27:15,719 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:27:15,719 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:15,719 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,719 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,720 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T13:27:15,721 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:15,721 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:15,722 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,723 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T13:27:15,723 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:15,723 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:27:15,723 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,724 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T13:27:15,724 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:15,724 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:27:15,724 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,725 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T13:27:15,725 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:15,726 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:27:15,726 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,726 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,727 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,728 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,728 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,728 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T13:27:15,729 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:15,731 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:27:15,732 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=796077, jitterRate=0.012265235185623169}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T13:27:15,732 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733578035719Initializing all the Stores at 1733578035720 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578035720Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578035720Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578035720Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578035720Cleaning up temporary data from old regions at 1733578035728 (+8 ms)Region opened successfully at 1733578035732 (+4 ms) 2024-12-07T13:27:15,732 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T13:27:15,735 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@647c1d83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:27:15,736 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T13:27:15,736 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T13:27:15,736 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T13:27:15,736 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T13:27:15,736 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T13:27:15,736 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T13:27:15,736 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T13:27:15,738 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T13:27:15,739 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T13:27:15,748 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T13:27:15,749 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T13:27:15,749 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T13:27:15,759 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T13:27:15,759 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T13:27:15,760 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T13:27:15,769 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T13:27:15,770 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T13:27:15,780 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T13:27:15,782 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T13:27:15,790 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T13:27:15,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:27:15,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:27:15,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:15,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:15,802 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c7c455b68129,38977,1733578035403, sessionid=0x100007597840000, setting cluster-up flag (Was=false) 2024-12-07T13:27:15,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:15,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:15,854 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T13:27:15,856 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,38977,1733578035403 2024-12-07T13:27:15,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:15,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:15,906 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T13:27:15,908 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,38977,1733578035403 2024-12-07T13:27:15,909 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T13:27:15,910 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T13:27:15,911 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T13:27:15,911 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T13:27:15,911 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c7c455b68129,38977,1733578035403 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T13:27:15,912 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:27:15,912 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:27:15,912 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:27:15,913 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:27:15,913 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c7c455b68129:0, corePoolSize=10, maxPoolSize=10 2024-12-07T13:27:15,913 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,913 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:27:15,913 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,914 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733578065914 2024-12-07T13:27:15,914 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T13:27:15,914 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T13:27:15,914 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T13:27:15,914 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T13:27:15,914 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T13:27:15,915 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T13:27:15,915 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:27:15,915 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,915 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T13:27:15,915 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T13:27:15,915 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T13:27:15,915 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T13:27:15,915 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(746): ClusterId : 05534688-1dae-48e6-a34e-cc3051ca6ca6 2024-12-07T13:27:15,915 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T13:27:15,915 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T13:27:15,915 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T13:27:15,916 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733578035915,5,FailOnTimeoutGroup] 2024-12-07T13:27:15,916 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733578035916,5,FailOnTimeoutGroup] 2024-12-07T13:27:15,916 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:15,916 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,916 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T13:27:15,916 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,916 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,916 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T13:27:15,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:27:15,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:27:15,924 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T13:27:15,925 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f 2024-12-07T13:27:15,928 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T13:27:15,928 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T13:27:15,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:27:15,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:27:15,931 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:15,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:27:15,933 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:27:15,933 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:15,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:15,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:27:15,935 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:27:15,935 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:15,935 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:15,935 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:27:15,936 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:27:15,936 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:15,937 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:15,937 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:27:15,938 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:27:15,938 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:15,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:15,938 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:27:15,939 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T13:27:15,939 DEBUG [RS:0;c7c455b68129:34335 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@93ca308, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:27:15,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740 2024-12-07T13:27:15,939 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740 2024-12-07T13:27:15,941 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:27:15,941 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:27:15,942 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:27:15,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:27:15,946 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:27:15,946 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805172, jitterRate=0.02382996678352356}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:27:15,947 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733578035931Initializing all the Stores at 1733578035931Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578035931Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578035931Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578035931Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578035931Cleaning up temporary data from old regions at 1733578035941 (+10 ms)Region opened successfully at 1733578035947 (+6 ms) 2024-12-07T13:27:15,947 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:27:15,947 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:27:15,947 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:27:15,947 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:27:15,947 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:27:15,948 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:27:15,948 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733578035947Disabling compacts and flushes for region at 1733578035947Disabling writes for close at 1733578035947Writing region close event to WAL at 1733578035947Closed at 1733578035947 2024-12-07T13:27:15,949 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:27:15,949 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T13:27:15,949 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T13:27:15,950 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:27:15,951 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T13:27:15,952 DEBUG [RS:0;c7c455b68129:34335 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c7c455b68129:34335 2024-12-07T13:27:15,953 INFO [RS:0;c7c455b68129:34335 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T13:27:15,953 INFO [RS:0;c7c455b68129:34335 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T13:27:15,953 DEBUG [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T13:27:15,953 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(2659): reportForDuty to master=c7c455b68129,38977,1733578035403 with port=34335, startcode=1733578035593 2024-12-07T13:27:15,953 DEBUG [RS:0;c7c455b68129:34335 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T13:27:15,955 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36235, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T13:27:15,955 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38977 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c7c455b68129,34335,1733578035593 2024-12-07T13:27:15,955 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38977 {}] master.ServerManager(517): Registering regionserver=c7c455b68129,34335,1733578035593 2024-12-07T13:27:15,956 DEBUG [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f 2024-12-07T13:27:15,956 DEBUG [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37523 2024-12-07T13:27:15,956 DEBUG [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T13:27:15,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:27:15,968 DEBUG [RS:0;c7c455b68129:34335 {}] zookeeper.ZKUtil(111): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c7c455b68129,34335,1733578035593 2024-12-07T13:27:15,968 WARN [RS:0;c7c455b68129:34335 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:27:15,968 INFO [RS:0;c7c455b68129:34335 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:27:15,968 DEBUG [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593 2024-12-07T13:27:15,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c7c455b68129,34335,1733578035593] 2024-12-07T13:27:15,971 INFO [RS:0;c7c455b68129:34335 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T13:27:15,972 INFO [RS:0;c7c455b68129:34335 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T13:27:15,973 INFO [RS:0;c7c455b68129:34335 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T13:27:15,973 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,973 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T13:27:15,974 INFO [RS:0;c7c455b68129:34335 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T13:27:15,974 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:27:15,974 DEBUG [RS:0;c7c455b68129:34335 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:27:15,975 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,975 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,975 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,975 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,975 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,975 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,34335,1733578035593-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:27:15,990 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T13:27:15,991 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,34335,1733578035593-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,991 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:15,991 INFO [RS:0;c7c455b68129:34335 {}] regionserver.Replication(171): c7c455b68129,34335,1733578035593 started 2024-12-07T13:27:16,002 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:16,002 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(1482): Serving as c7c455b68129,34335,1733578035593, RpcServer on c7c455b68129/172.17.0.3:34335, sessionid=0x100007597840001 2024-12-07T13:27:16,002 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T13:27:16,003 DEBUG [RS:0;c7c455b68129:34335 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c7c455b68129,34335,1733578035593 2024-12-07T13:27:16,003 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,34335,1733578035593' 2024-12-07T13:27:16,003 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T13:27:16,003 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T13:27:16,003 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T13:27:16,003 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T13:27:16,003 DEBUG [RS:0;c7c455b68129:34335 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c7c455b68129,34335,1733578035593 2024-12-07T13:27:16,003 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,34335,1733578035593' 2024-12-07T13:27:16,003 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T13:27:16,004 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T13:27:16,004 DEBUG [RS:0;c7c455b68129:34335 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T13:27:16,004 INFO [RS:0;c7c455b68129:34335 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T13:27:16,004 INFO [RS:0;c7c455b68129:34335 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T13:27:16,101 WARN [c7c455b68129:38977 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T13:27:16,106 INFO [RS:0;c7c455b68129:34335 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C34335%2C1733578035593, suffix=, logDir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593, archiveDir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/oldWALs, maxLogs=32 2024-12-07T13:27:16,106 INFO [RS:0;c7c455b68129:34335 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C34335%2C1733578035593.1733578036106 2024-12-07T13:27:16,111 INFO [RS:0;c7c455b68129:34335 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593/c7c455b68129%2C34335%2C1733578035593.1733578036106 2024-12-07T13:27:16,112 DEBUG [RS:0;c7c455b68129:34335 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39449:39449),(127.0.0.1/127.0.0.1:39113:39113)] 2024-12-07T13:27:16,352 DEBUG [c7c455b68129:38977 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T13:27:16,353 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c7c455b68129,34335,1733578035593 2024-12-07T13:27:16,355 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,34335,1733578035593, state=OPENING 2024-12-07T13:27:16,370 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T13:27:16,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:16,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:16,446 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:27:16,446 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:27:16,446 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:27:16,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,34335,1733578035593}] 2024-12-07T13:27:16,602 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T13:27:16,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:16,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:16,605 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57323, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T13:27:16,612 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T13:27:16,613 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:27:16,616 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C34335%2C1733578035593.meta, suffix=.meta, logDir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593, archiveDir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/oldWALs, maxLogs=32 2024-12-07T13:27:16,616 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C34335%2C1733578035593.meta.1733578036616.meta 2024-12-07T13:27:16,623 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593/c7c455b68129%2C34335%2C1733578035593.meta.1733578036616.meta 2024-12-07T13:27:16,626 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39449:39449),(127.0.0.1/127.0.0.1:39113:39113)] 2024-12-07T13:27:16,629 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:27:16,629 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T13:27:16,629 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T13:27:16,629 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T13:27:16,629 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T13:27:16,629 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:16,629 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T13:27:16,629 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T13:27:16,631 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:27:16,632 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:27:16,632 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:16,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:16,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:27:16,633 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:27:16,633 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:16,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:16,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:27:16,634 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:27:16,634 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:16,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:16,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:27:16,635 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:27:16,635 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:16,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:16,636 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:27:16,636 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740 2024-12-07T13:27:16,637 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740 2024-12-07T13:27:16,638 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:27:16,638 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:27:16,639 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:27:16,640 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:27:16,641 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=832307, jitterRate=0.058334171772003174}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:27:16,641 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T13:27:16,641 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733578036629Writing region info on filesystem at 1733578036629Initializing all the Stores at 1733578036630 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578036630Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578036631 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578036631Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578036631Cleaning up temporary data from old regions at 1733578036638 (+7 ms)Running coprocessor post-open hooks at 1733578036641 (+3 ms)Region opened successfully at 1733578036641 2024-12-07T13:27:16,642 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733578036601 2024-12-07T13:27:16,644 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T13:27:16,644 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T13:27:16,645 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,34335,1733578035593 2024-12-07T13:27:16,645 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,34335,1733578035593, state=OPEN 2024-12-07T13:27:16,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:27:16,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:27:16,684 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c7c455b68129,34335,1733578035593 2024-12-07T13:27:16,684 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:27:16,684 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:27:16,688 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T13:27:16,688 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,34335,1733578035593 in 238 msec 2024-12-07T13:27:16,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T13:27:16,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 739 msec 2024-12-07T13:27:16,695 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:27:16,695 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T13:27:16,697 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:27:16,697 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,34335,1733578035593, seqNum=-1] 2024-12-07T13:27:16,697 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:27:16,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58583, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:27:16,705 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 794 msec 2024-12-07T13:27:16,705 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733578036705, completionTime=-1 2024-12-07T13:27:16,705 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T13:27:16,705 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T13:27:16,707 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T13:27:16,707 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733578096707 2024-12-07T13:27:16,707 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733578156707 2024-12-07T13:27:16,707 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-07T13:27:16,707 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,38977,1733578035403-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:16,708 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,38977,1733578035403-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:16,708 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,38977,1733578035403-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:16,708 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c7c455b68129:38977, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:16,708 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:16,708 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:16,710 DEBUG [master/c7c455b68129:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T13:27:16,712 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.058sec 2024-12-07T13:27:16,712 INFO [master/c7c455b68129:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T13:27:16,712 INFO [master/c7c455b68129:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T13:27:16,712 INFO [master/c7c455b68129:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T13:27:16,712 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T13:27:16,712 INFO [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T13:27:16,712 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,38977,1733578035403-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:27:16,713 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,38977,1733578035403-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T13:27:16,716 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T13:27:16,716 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T13:27:16,716 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,38977,1733578035403-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:16,716 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@721248d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:27:16,716 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c7c455b68129,38977,-1 for getting cluster id 2024-12-07T13:27:16,717 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T13:27:16,718 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '05534688-1dae-48e6-a34e-cc3051ca6ca6' 2024-12-07T13:27:16,718 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T13:27:16,719 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "05534688-1dae-48e6-a34e-cc3051ca6ca6" 2024-12-07T13:27:16,719 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f6eae0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:27:16,719 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c7c455b68129,38977,-1] 2024-12-07T13:27:16,719 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T13:27:16,720 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:16,721 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39524, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T13:27:16,722 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cc27ae2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:27:16,722 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:27:16,723 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,34335,1733578035593, seqNum=-1] 2024-12-07T13:27:16,724 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:27:16,725 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51022, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:27:16,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c7c455b68129,38977,1733578035403 2024-12-07T13:27:16,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:16,730 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T13:27:16,730 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-07T13:27:16,731 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is c7c455b68129,38977,1733578035403 2024-12-07T13:27:16,731 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@75253dee 2024-12-07T13:27:16,731 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T13:27:16,732 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39530, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T13:27:16,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38977 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-07T13:27:16,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-07T13:27:16,732 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38977 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:27:16,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38977 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-07T13:27:16,735 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T13:27:16,735 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:16,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38977 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-07T13:27:16,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T13:27:16,736 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T13:27:16,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741835_1011 (size=381) 2024-12-07T13:27:16,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741835_1011 (size=381) 2024-12-07T13:27:16,745 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 82e88219ec4ec4a4fff2a6f409a36e45, NAME => 'TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f 2024-12-07T13:27:16,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741836_1012 (size=64) 2024-12-07T13:27:16,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741836_1012 (size=64) 2024-12-07T13:27:16,752 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:16,752 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 82e88219ec4ec4a4fff2a6f409a36e45, disabling compactions & flushes 2024-12-07T13:27:16,753 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:16,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:16,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. after waiting 0 ms 2024-12-07T13:27:16,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:16,753 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:16,753 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 82e88219ec4ec4a4fff2a6f409a36e45: Waiting for close lock at 1733578036752Disabling compacts and flushes for region at 1733578036752Disabling writes for close at 1733578036753 (+1 ms)Writing region close event to WAL at 1733578036753Closed at 1733578036753 2024-12-07T13:27:16,754 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T13:27:16,754 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733578036754"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733578036754"}]},"ts":"1733578036754"} 2024-12-07T13:27:16,756 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-07T13:27:16,757 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T13:27:16,757 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733578036757"}]},"ts":"1733578036757"} 2024-12-07T13:27:16,759 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-07T13:27:16,759 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=82e88219ec4ec4a4fff2a6f409a36e45, ASSIGN}] 2024-12-07T13:27:16,760 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=82e88219ec4ec4a4fff2a6f409a36e45, ASSIGN 2024-12-07T13:27:16,761 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=82e88219ec4ec4a4fff2a6f409a36e45, ASSIGN; state=OFFLINE, location=c7c455b68129,34335,1733578035593; forceNewPlan=false, retain=false 2024-12-07T13:27:16,913 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=82e88219ec4ec4a4fff2a6f409a36e45, regionState=OPENING, regionLocation=c7c455b68129,34335,1733578035593 2024-12-07T13:27:16,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=82e88219ec4ec4a4fff2a6f409a36e45, ASSIGN because future has completed 2024-12-07T13:27:16,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82e88219ec4ec4a4fff2a6f409a36e45, server=c7c455b68129,34335,1733578035593}] 2024-12-07T13:27:17,082 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:17,082 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 82e88219ec4ec4a4fff2a6f409a36e45, NAME => 'TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:27:17,082 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,083 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:17,083 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,083 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,086 INFO [StoreOpener-82e88219ec4ec4a4fff2a6f409a36e45-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,089 INFO [StoreOpener-82e88219ec4ec4a4fff2a6f409a36e45-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 82e88219ec4ec4a4fff2a6f409a36e45 columnFamilyName info 2024-12-07T13:27:17,089 DEBUG [StoreOpener-82e88219ec4ec4a4fff2a6f409a36e45-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:17,090 INFO [StoreOpener-82e88219ec4ec4a4fff2a6f409a36e45-1 {}] regionserver.HStore(327): Store=82e88219ec4ec4a4fff2a6f409a36e45/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:27:17,090 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,092 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,092 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,093 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,093 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,096 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,100 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:27:17,100 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 82e88219ec4ec4a4fff2a6f409a36e45; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=851516, jitterRate=0.08275866508483887}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T13:27:17,100 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:17,101 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 82e88219ec4ec4a4fff2a6f409a36e45: Running coprocessor pre-open hook at 1733578037083Writing region info on filesystem at 1733578037083Initializing all the Stores at 1733578037085 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578037085Cleaning up temporary data from old regions at 1733578037093 (+8 ms)Running coprocessor post-open hooks at 1733578037100 (+7 ms)Region opened successfully at 1733578037101 (+1 ms) 2024-12-07T13:27:17,102 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., pid=6, masterSystemTime=1733578037073 2024-12-07T13:27:17,104 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:17,104 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:17,105 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=82e88219ec4ec4a4fff2a6f409a36e45, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,34335,1733578035593 2024-12-07T13:27:17,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82e88219ec4ec4a4fff2a6f409a36e45, server=c7c455b68129,34335,1733578035593 because future has completed 2024-12-07T13:27:17,112 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T13:27:17,112 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 82e88219ec4ec4a4fff2a6f409a36e45, server=c7c455b68129,34335,1733578035593 in 190 msec 2024-12-07T13:27:17,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T13:27:17,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=82e88219ec4ec4a4fff2a6f409a36e45, ASSIGN in 353 msec 2024-12-07T13:27:17,117 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T13:27:17,117 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733578037117"}]},"ts":"1733578037117"} 2024-12-07T13:27:17,120 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-07T13:27:17,121 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T13:27:17,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 389 msec 2024-12-07T13:27:17,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:17,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:17,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,604 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,605 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:17,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,110 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:27:18,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,128 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,132 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,134 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:18,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-07T13:27:18,427 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-07T13:27:18,428 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-07T13:27:18,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:18,605 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:19,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:19,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:20,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:20,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:21,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:21,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:21,971 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T13:27:21,972 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-07T13:27:22,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:22,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:23,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:23,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:23,933 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:27:23,934 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,934 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,935 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,935 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,935 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:23,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:24,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:24,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:25,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:25,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:26,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:26,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:26,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-07T13:27:26,828 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-07T13:27:26,828 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-07T13:27:26,834 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-07T13:27:26,834 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:26,838 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., hostname=c7c455b68129,34335,1733578035593, seqNum=2] 2024-12-07T13:27:26,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:26,853 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 82e88219ec4ec4a4fff2a6f409a36e45 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:27:26,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/97735dcba6434a1a8022c16e6b479784 is 1080, key is row0001/info:/1733578046839/Put/seqid=0 2024-12-07T13:27:26,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741837_1013 (size=12509) 2024-12-07T13:27:26,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741837_1013 (size=12509) 2024-12-07T13:27:26,881 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/97735dcba6434a1a8022c16e6b479784 2024-12-07T13:27:26,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/97735dcba6434a1a8022c16e6b479784 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/97735dcba6434a1a8022c16e6b479784 2024-12-07T13:27:26,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/97735dcba6434a1a8022c16e6b479784, entries=7, sequenceid=11, filesize=12.2 K 2024-12-07T13:27:26,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 82e88219ec4ec4a4fff2a6f409a36e45 in 40ms, sequenceid=11, compaction requested=false 2024-12-07T13:27:26,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:26,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:26,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 82e88219ec4ec4a4fff2a6f409a36e45 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-12-07T13:27:26,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/30ab9dbc66f14e8099ec2e9bb78cc10c is 1080, key is row0008/info:/1733578046854/Put/seqid=0 2024-12-07T13:27:26,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741838_1014 (size=26530) 2024-12-07T13:27:26,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741838_1014 (size=26530) 2024-12-07T13:27:26,903 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/30ab9dbc66f14e8099ec2e9bb78cc10c 2024-12-07T13:27:26,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/30ab9dbc66f14e8099ec2e9bb78cc10c as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/30ab9dbc66f14e8099ec2e9bb78cc10c 2024-12-07T13:27:26,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/30ab9dbc66f14e8099ec2e9bb78cc10c, entries=20, sequenceid=34, filesize=25.9 K 2024-12-07T13:27:26,913 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=5.25 KB/5380 for 82e88219ec4ec4a4fff2a6f409a36e45 in 20ms, sequenceid=34, compaction requested=false 2024-12-07T13:27:26,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:26,913 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=38.1 K, sizeToCheck=16.0 K 2024-12-07T13:27:26,913 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:26,913 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/30ab9dbc66f14e8099ec2e9bb78cc10c because midkey is the same as first or last row 2024-12-07T13:27:27,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:27,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:28,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:28,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:28,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:28,913 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 82e88219ec4ec4a4fff2a6f409a36e45 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:27:28,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/a690da9d0dba416c808ff46226de3cc9 is 1080, key is row0028/info:/1733578046895/Put/seqid=0 2024-12-07T13:27:28,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741839_1015 (size=12509) 2024-12-07T13:27:28,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741839_1015 (size=12509) 2024-12-07T13:27:28,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/a690da9d0dba416c808ff46226de3cc9 2024-12-07T13:27:28,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/a690da9d0dba416c808ff46226de3cc9 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/a690da9d0dba416c808ff46226de3cc9 2024-12-07T13:27:28,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/a690da9d0dba416c808ff46226de3cc9, entries=7, sequenceid=44, filesize=12.2 K 2024-12-07T13:27:28,939 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 82e88219ec4ec4a4fff2a6f409a36e45 in 27ms, sequenceid=44, compaction requested=true 2024-12-07T13:27:28,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:28,939 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=50.3 K, sizeToCheck=16.0 K 2024-12-07T13:27:28,939 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:28,939 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/30ab9dbc66f14e8099ec2e9bb78cc10c because midkey is the same as first or last row 2024-12-07T13:27:28,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 82e88219ec4ec4a4fff2a6f409a36e45:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:28,940 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:28,940 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:28,941 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 51548 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:28,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:28,941 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1541): 82e88219ec4ec4a4fff2a6f409a36e45/info is initiating minor compaction (all files) 2024-12-07T13:27:28,941 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 82e88219ec4ec4a4fff2a6f409a36e45/info in TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:28,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 82e88219ec4ec4a4fff2a6f409a36e45 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-07T13:27:28,941 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/97735dcba6434a1a8022c16e6b479784, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/30ab9dbc66f14e8099ec2e9bb78cc10c, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/a690da9d0dba416c808ff46226de3cc9] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp, totalSize=50.3 K 2024-12-07T13:27:28,942 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 97735dcba6434a1a8022c16e6b479784, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733578046839 2024-12-07T13:27:28,942 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 30ab9dbc66f14e8099ec2e9bb78cc10c, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733578046854 2024-12-07T13:27:28,943 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting a690da9d0dba416c808ff46226de3cc9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1733578046895 2024-12-07T13:27:28,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/681f579c8fff4ddbad40a42ba47de76b is 1080, key is row0035/info:/1733578048915/Put/seqid=0 2024-12-07T13:27:28,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741840_1016 (size=18987) 2024-12-07T13:27:28,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741840_1016 (size=18987) 2024-12-07T13:27:28,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/681f579c8fff4ddbad40a42ba47de76b 2024-12-07T13:27:28,955 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 82e88219ec4ec4a4fff2a6f409a36e45#info#compaction#59 average throughput is 34.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:28,956 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/7c468289625b49a8a8f1212cd1e67659 is 1080, key is row0001/info:/1733578046839/Put/seqid=0 2024-12-07T13:27:28,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/681f579c8fff4ddbad40a42ba47de76b as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/681f579c8fff4ddbad40a42ba47de76b 2024-12-07T13:27:28,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741841_1017 (size=41747) 2024-12-07T13:27:28,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741841_1017 (size=41747) 2024-12-07T13:27:28,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/681f579c8fff4ddbad40a42ba47de76b, entries=13, sequenceid=60, filesize=18.5 K 2024-12-07T13:27:28,966 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 82e88219ec4ec4a4fff2a6f409a36e45 in 24ms, sequenceid=60, compaction requested=false 2024-12-07T13:27:28,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:28,966 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.9 K, sizeToCheck=16.0 K 2024-12-07T13:27:28,966 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:28,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:28,966 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/30ab9dbc66f14e8099ec2e9bb78cc10c because midkey is the same as first or last row 2024-12-07T13:27:28,966 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 82e88219ec4ec4a4fff2a6f409a36e45 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-07T13:27:28,969 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/7c468289625b49a8a8f1212cd1e67659 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/7c468289625b49a8a8f1212cd1e67659 2024-12-07T13:27:28,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/9f5526a8bade4a20b5b039c5852948d0 is 1080, key is row0048/info:/1733578048942/Put/seqid=0 2024-12-07T13:27:28,977 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 82e88219ec4ec4a4fff2a6f409a36e45/info of 82e88219ec4ec4a4fff2a6f409a36e45 into 7c468289625b49a8a8f1212cd1e67659(size=40.8 K), total size for store is 59.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:28,978 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., storeName=82e88219ec4ec4a4fff2a6f409a36e45/info, priority=13, startTime=1733578048940; duration=0sec 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/7c468289625b49a8a8f1212cd1e67659 because midkey is the same as first or last row 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-07T13:27:28,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741842_1018 (size=16817) 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/7c468289625b49a8a8f1212cd1e67659 because midkey is the same as first or last row 2024-12-07T13:27:28,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741842_1018 (size=16817) 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=59.3 K, sizeToCheck=16.0 K 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/7c468289625b49a8a8f1212cd1e67659 because midkey is the same as first or last row 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:28,978 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 82e88219ec4ec4a4fff2a6f409a36e45:info 2024-12-07T13:27:28,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/9f5526a8bade4a20b5b039c5852948d0 2024-12-07T13:27:28,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/9f5526a8bade4a20b5b039c5852948d0 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9f5526a8bade4a20b5b039c5852948d0 2024-12-07T13:27:28,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9f5526a8bade4a20b5b039c5852948d0, entries=11, sequenceid=74, filesize=16.4 K 2024-12-07T13:27:28,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=6.30 KB/6456 for 82e88219ec4ec4a4fff2a6f409a36e45 in 24ms, sequenceid=74, compaction requested=true 2024-12-07T13:27:28,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:28,991 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=75.7 K, sizeToCheck=16.0 K 2024-12-07T13:27:28,991 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:28,991 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/7c468289625b49a8a8f1212cd1e67659 because midkey is the same as first or last row 2024-12-07T13:27:28,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 82e88219ec4ec4a4fff2a6f409a36e45:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:28,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:28,991 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:28,992 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:28,992 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1541): 82e88219ec4ec4a4fff2a6f409a36e45/info is initiating minor compaction (all files) 2024-12-07T13:27:28,992 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 82e88219ec4ec4a4fff2a6f409a36e45/info in TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:28,992 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/7c468289625b49a8a8f1212cd1e67659, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/681f579c8fff4ddbad40a42ba47de76b, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9f5526a8bade4a20b5b039c5852948d0] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp, totalSize=75.7 K 2024-12-07T13:27:28,992 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c468289625b49a8a8f1212cd1e67659, keycount=34, bloomtype=ROW, size=40.8 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1733578046839 2024-12-07T13:27:28,993 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 681f579c8fff4ddbad40a42ba47de76b, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1733578048915 2024-12-07T13:27:28,993 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f5526a8bade4a20b5b039c5852948d0, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733578048942 2024-12-07T13:27:29,002 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 82e88219ec4ec4a4fff2a6f409a36e45#info#compaction#61 average throughput is 59.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:29,003 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/0fb24f63c48c46c88b32c10dc4696f9f is 1080, key is row0001/info:/1733578046839/Put/seqid=0 2024-12-07T13:27:29,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741843_1019 (size=67766) 2024-12-07T13:27:29,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741843_1019 (size=67766) 2024-12-07T13:27:29,013 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/0fb24f63c48c46c88b32c10dc4696f9f as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f 2024-12-07T13:27:29,018 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 82e88219ec4ec4a4fff2a6f409a36e45/info of 82e88219ec4ec4a4fff2a6f409a36e45 into 0fb24f63c48c46c88b32c10dc4696f9f(size=66.2 K), total size for store is 66.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:29,018 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:29,018 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., storeName=82e88219ec4ec4a4fff2a6f409a36e45/info, priority=13, startTime=1733578048991; duration=0sec 2024-12-07T13:27:29,018 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-12-07T13:27:29,018 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:29,019 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f because midkey is the same as first or last row 2024-12-07T13:27:29,019 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-12-07T13:27:29,019 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:29,019 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f because midkey is the same as first or last row 2024-12-07T13:27:29,019 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.2 K, sizeToCheck=16.0 K 2024-12-07T13:27:29,019 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:29,019 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f because midkey is the same as first or last row 2024-12-07T13:27:29,019 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:29,019 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 82e88219ec4ec4a4fff2a6f409a36e45:info 2024-12-07T13:27:29,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:29,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:30,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:30,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:30,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:30,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 82e88219ec4ec4a4fff2a6f409a36e45 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:27:30,992 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/3f9d250a78ef45adb72e722224585bd2 is 1080, key is row0059/info:/1733578048967/Put/seqid=0 2024-12-07T13:27:30,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741844_1020 (size=12509) 2024-12-07T13:27:30,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741844_1020 (size=12509) 2024-12-07T13:27:30,999 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/3f9d250a78ef45adb72e722224585bd2 2024-12-07T13:27:31,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/3f9d250a78ef45adb72e722224585bd2 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/3f9d250a78ef45adb72e722224585bd2 2024-12-07T13:27:31,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/3f9d250a78ef45adb72e722224585bd2, entries=7, sequenceid=86, filesize=12.2 K 2024-12-07T13:27:31,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 82e88219ec4ec4a4fff2a6f409a36e45 in 28ms, sequenceid=86, compaction requested=false 2024-12-07T13:27:31,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:31,012 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-12-07T13:27:31,013 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:31,013 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f because midkey is the same as first or last row 2024-12-07T13:27:31,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 82e88219ec4ec4a4fff2a6f409a36e45 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-07T13:27:31,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/9addf242ba0c4807b0c885c54f6b44fb is 1080, key is row0066/info:/1733578050986/Put/seqid=0 2024-12-07T13:27:31,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741845_1021 (size=17894) 2024-12-07T13:27:31,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741845_1021 (size=17894) 2024-12-07T13:27:31,024 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=101 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/9addf242ba0c4807b0c885c54f6b44fb 2024-12-07T13:27:31,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/9addf242ba0c4807b0c885c54f6b44fb as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9addf242ba0c4807b0c885c54f6b44fb 2024-12-07T13:27:31,036 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9addf242ba0c4807b0c885c54f6b44fb, entries=12, sequenceid=101, filesize=17.5 K 2024-12-07T13:27:31,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 82e88219ec4ec4a4fff2a6f409a36e45 in 23ms, sequenceid=101, compaction requested=true 2024-12-07T13:27:31,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:31,037 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=95.9 K, sizeToCheck=16.0 K 2024-12-07T13:27:31,037 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:31,037 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f because midkey is the same as first or last row 2024-12-07T13:27:31,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 82e88219ec4ec4a4fff2a6f409a36e45:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:31,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:31,037 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:31,038 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 98169 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:31,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,038 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1541): 82e88219ec4ec4a4fff2a6f409a36e45/info is initiating minor compaction (all files) 2024-12-07T13:27:31,038 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 82e88219ec4ec4a4fff2a6f409a36e45/info in TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:31,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 82e88219ec4ec4a4fff2a6f409a36e45 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-07T13:27:31,038 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/3f9d250a78ef45adb72e722224585bd2, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9addf242ba0c4807b0c885c54f6b44fb] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp, totalSize=95.9 K 2024-12-07T13:27:31,039 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0fb24f63c48c46c88b32c10dc4696f9f, keycount=58, bloomtype=ROW, size=66.2 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733578046839 2024-12-07T13:27:31,039 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3f9d250a78ef45adb72e722224585bd2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1733578048967 2024-12-07T13:27:31,039 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9addf242ba0c4807b0c885c54f6b44fb, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733578050986 2024-12-07T13:27:31,042 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/8c0a29dee7dc43a1acc0d0438647509e is 1080, key is row0078/info:/1733578051016/Put/seqid=0 2024-12-07T13:27:31,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741846_1022 (size=16817) 2024-12-07T13:27:31,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741846_1022 (size=16817) 2024-12-07T13:27:31,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/8c0a29dee7dc43a1acc0d0438647509e 2024-12-07T13:27:31,072 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 82e88219ec4ec4a4fff2a6f409a36e45#info#compaction#65 average throughput is 39.51 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:31,072 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/085f4f8c7f6347d48dc94f1ef2666f12 is 1080, key is row0001/info:/1733578046839/Put/seqid=0 2024-12-07T13:27:31,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/8c0a29dee7dc43a1acc0d0438647509e as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/8c0a29dee7dc43a1acc0d0438647509e 2024-12-07T13:27:31,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741847_1023 (size=88408) 2024-12-07T13:27:31,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741847_1023 (size=88408) 2024-12-07T13:27:31,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/8c0a29dee7dc43a1acc0d0438647509e, entries=11, sequenceid=115, filesize=16.4 K 2024-12-07T13:27:31,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=8.41 KB/8608 for 82e88219ec4ec4a4fff2a6f409a36e45 in 43ms, sequenceid=115, compaction requested=false 2024-12-07T13:27:31,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:31,081 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=112.3 K, sizeToCheck=16.0 K 2024-12-07T13:27:31,081 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:31,081 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f because midkey is the same as first or last row 2024-12-07T13:27:31,083 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/085f4f8c7f6347d48dc94f1ef2666f12 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/085f4f8c7f6347d48dc94f1ef2666f12 2024-12-07T13:27:31,089 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 82e88219ec4ec4a4fff2a6f409a36e45/info of 82e88219ec4ec4a4fff2a6f409a36e45 into 085f4f8c7f6347d48dc94f1ef2666f12(size=86.3 K), total size for store is 102.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:31,089 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 82e88219ec4ec4a4fff2a6f409a36e45: 2024-12-07T13:27:31,089 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., storeName=82e88219ec4ec4a4fff2a6f409a36e45/info, priority=13, startTime=1733578051037; duration=0sec 2024-12-07T13:27:31,089 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-12-07T13:27:31,089 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:31,089 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-12-07T13:27:31,089 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:31,090 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.8 K, sizeToCheck=16.0 K 2024-12-07T13:27:31,090 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-07T13:27:31,091 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:31,091 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:31,091 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 82e88219ec4ec4a4fff2a6f409a36e45:info 2024-12-07T13:27:31,092 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38977 {}] assignment.AssignmentManager(1363): Split request from c7c455b68129,34335,1733578035593, parent={ENCODED => 82e88219ec4ec4a4fff2a6f409a36e45, NAME => 'TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-07T13:27:31,096 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38977 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=c7c455b68129,34335,1733578035593 2024-12-07T13:27:31,100 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38977 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=82e88219ec4ec4a4fff2a6f409a36e45, daughterA=85a851f2688c52b461cdf08f415e962c, daughterB=977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,102 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=82e88219ec4ec4a4fff2a6f409a36e45, daughterA=85a851f2688c52b461cdf08f415e962c, daughterB=977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,102 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=82e88219ec4ec4a4fff2a6f409a36e45, daughterA=85a851f2688c52b461cdf08f415e962c, daughterB=977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,102 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=82e88219ec4ec4a4fff2a6f409a36e45, daughterA=85a851f2688c52b461cdf08f415e962c, daughterB=977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,109 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=82e88219ec4ec4a4fff2a6f409a36e45, UNASSIGN}] 2024-12-07T13:27:31,110 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=82e88219ec4ec4a4fff2a6f409a36e45, UNASSIGN 2024-12-07T13:27:31,112 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=82e88219ec4ec4a4fff2a6f409a36e45, regionState=CLOSING, regionLocation=c7c455b68129,34335,1733578035593 2024-12-07T13:27:31,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=82e88219ec4ec4a4fff2a6f409a36e45, UNASSIGN because future has completed 2024-12-07T13:27:31,115 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-07T13:27:31,115 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 82e88219ec4ec4a4fff2a6f409a36e45, server=c7c455b68129,34335,1733578035593}] 2024-12-07T13:27:31,273 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,273 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-07T13:27:31,273 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 82e88219ec4ec4a4fff2a6f409a36e45, disabling compactions & flushes 2024-12-07T13:27:31,274 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:31,274 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:31,274 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. after waiting 0 ms 2024-12-07T13:27:31,274 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:31,274 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 82e88219ec4ec4a4fff2a6f409a36e45 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-07T13:27:31,280 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/b01896581d774266acdaca83c5712e72 is 1080, key is row0089/info:/1733578051039/Put/seqid=0 2024-12-07T13:27:31,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741848_1024 (size=13586) 2024-12-07T13:27:31,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741848_1024 (size=13586) 2024-12-07T13:27:31,286 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/b01896581d774266acdaca83c5712e72 2024-12-07T13:27:31,293 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/.tmp/info/b01896581d774266acdaca83c5712e72 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/b01896581d774266acdaca83c5712e72 2024-12-07T13:27:31,300 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/b01896581d774266acdaca83c5712e72, entries=8, sequenceid=127, filesize=13.3 K 2024-12-07T13:27:31,301 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 82e88219ec4ec4a4fff2a6f409a36e45 in 27ms, sequenceid=127, compaction requested=true 2024-12-07T13:27:31,303 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/97735dcba6434a1a8022c16e6b479784, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/30ab9dbc66f14e8099ec2e9bb78cc10c, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/7c468289625b49a8a8f1212cd1e67659, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/a690da9d0dba416c808ff46226de3cc9, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/681f579c8fff4ddbad40a42ba47de76b, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9f5526a8bade4a20b5b039c5852948d0, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/3f9d250a78ef45adb72e722224585bd2, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9addf242ba0c4807b0c885c54f6b44fb] to archive 2024-12-07T13:27:31,304 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T13:27:31,307 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/97735dcba6434a1a8022c16e6b479784 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/97735dcba6434a1a8022c16e6b479784 2024-12-07T13:27:31,308 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/30ab9dbc66f14e8099ec2e9bb78cc10c to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/30ab9dbc66f14e8099ec2e9bb78cc10c 2024-12-07T13:27:31,309 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/7c468289625b49a8a8f1212cd1e67659 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/7c468289625b49a8a8f1212cd1e67659 2024-12-07T13:27:31,311 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/a690da9d0dba416c808ff46226de3cc9 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/a690da9d0dba416c808ff46226de3cc9 2024-12-07T13:27:31,312 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/681f579c8fff4ddbad40a42ba47de76b to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/681f579c8fff4ddbad40a42ba47de76b 2024-12-07T13:27:31,314 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/0fb24f63c48c46c88b32c10dc4696f9f 2024-12-07T13:27:31,315 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9f5526a8bade4a20b5b039c5852948d0 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9f5526a8bade4a20b5b039c5852948d0 2024-12-07T13:27:31,317 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/3f9d250a78ef45adb72e722224585bd2 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/3f9d250a78ef45adb72e722224585bd2 2024-12-07T13:27:31,319 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9addf242ba0c4807b0c885c54f6b44fb to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/9addf242ba0c4807b0c885c54f6b44fb 2024-12-07T13:27:31,326 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-12-07T13:27:31,327 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. 2024-12-07T13:27:31,327 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 82e88219ec4ec4a4fff2a6f409a36e45: Waiting for close lock at 1733578051273Running coprocessor pre-close hooks at 1733578051273Disabling compacts and flushes for region at 1733578051273Disabling writes for close at 1733578051274 (+1 ms)Obtaining lock to block concurrent updates at 1733578051274Preparing flush snapshotting stores in 82e88219ec4ec4a4fff2a6f409a36e45 at 1733578051274Finished memstore snapshotting TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., syncing WAL and waiting on mvcc, flushsize=dataSize=8608, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1733578051274Flushing stores of TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. at 1733578051275 (+1 ms)Flushing 82e88219ec4ec4a4fff2a6f409a36e45/info: creating writer at 1733578051275Flushing 82e88219ec4ec4a4fff2a6f409a36e45/info: appending metadata at 1733578051279 (+4 ms)Flushing 82e88219ec4ec4a4fff2a6f409a36e45/info: closing flushed file at 1733578051279Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@dd82ebf: reopening flushed file at 1733578051292 (+13 ms)Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 82e88219ec4ec4a4fff2a6f409a36e45 in 27ms, sequenceid=127, compaction requested=true at 1733578051301 (+9 ms)Writing region close event to WAL at 1733578051322 (+21 ms)Running coprocessor post-close hooks at 1733578051327 (+5 ms)Closed at 1733578051327 2024-12-07T13:27:31,330 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,331 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=82e88219ec4ec4a4fff2a6f409a36e45, regionState=CLOSED 2024-12-07T13:27:31,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 82e88219ec4ec4a4fff2a6f409a36e45, server=c7c455b68129,34335,1733578035593 because future has completed 2024-12-07T13:27:31,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-07T13:27:31,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 82e88219ec4ec4a4fff2a6f409a36e45, server=c7c455b68129,34335,1733578035593 in 220 msec 2024-12-07T13:27:31,342 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-07T13:27:31,342 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=82e88219ec4ec4a4fff2a6f409a36e45, UNASSIGN in 229 msec 2024-12-07T13:27:31,355 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:31,360 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=82e88219ec4ec4a4fff2a6f409a36e45, threads=3 2024-12-07T13:27:31,362 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/8c0a29dee7dc43a1acc0d0438647509e for region: 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,362 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/b01896581d774266acdaca83c5712e72 for region: 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,363 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/085f4f8c7f6347d48dc94f1ef2666f12 for region: 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,374 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/8c0a29dee7dc43a1acc0d0438647509e, top=true 2024-12-07T13:27:31,374 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/b01896581d774266acdaca83c5712e72, top=true 2024-12-07T13:27:31,383 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-b01896581d774266acdaca83c5712e72 for child: 977deab04a51d2e8e101e5c7f7816b2b, parent: 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,383 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/b01896581d774266acdaca83c5712e72 for region: 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,384 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-8c0a29dee7dc43a1acc0d0438647509e for child: 977deab04a51d2e8e101e5c7f7816b2b, parent: 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,384 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/8c0a29dee7dc43a1acc0d0438647509e for region: 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741849_1025 (size=27) 2024-12-07T13:27:31,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741849_1025 (size=27) 2024-12-07T13:27:31,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741850_1026 (size=27) 2024-12-07T13:27:31,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741850_1026 (size=27) 2024-12-07T13:27:31,397 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/085f4f8c7f6347d48dc94f1ef2666f12 for region: 82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:31,398 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 82e88219ec4ec4a4fff2a6f409a36e45 Daughter A: [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45] storefiles, Daughter B: [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-8c0a29dee7dc43a1acc0d0438647509e, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-b01896581d774266acdaca83c5712e72] storefiles. 2024-12-07T13:27:31,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741851_1027 (size=71) 2024-12-07T13:27:31,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741851_1027 (size=71) 2024-12-07T13:27:31,406 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:31,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741852_1028 (size=71) 2024-12-07T13:27:31,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741852_1028 (size=71) 2024-12-07T13:27:31,419 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:31,430 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-12-07T13:27:31,432 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-12-07T13:27:31,434 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733578051434"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733578051434"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733578051434"}]},"ts":"1733578051434"} 2024-12-07T13:27:31,434 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733578051434"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733578051434"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733578051434"}]},"ts":"1733578051434"} 2024-12-07T13:27:31,434 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733578051434"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733578051434"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733578051434"}]},"ts":"1733578051434"} 2024-12-07T13:27:31,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=85a851f2688c52b461cdf08f415e962c, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=977deab04a51d2e8e101e5c7f7816b2b, ASSIGN}] 2024-12-07T13:27:31,450 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=977deab04a51d2e8e101e5c7f7816b2b, ASSIGN 2024-12-07T13:27:31,450 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=85a851f2688c52b461cdf08f415e962c, ASSIGN 2024-12-07T13:27:31,451 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=977deab04a51d2e8e101e5c7f7816b2b, ASSIGN; state=SPLITTING_NEW, location=c7c455b68129,34335,1733578035593; forceNewPlan=false, retain=false 2024-12-07T13:27:31,451 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=85a851f2688c52b461cdf08f415e962c, ASSIGN; state=SPLITTING_NEW, location=c7c455b68129,34335,1733578035593; forceNewPlan=false, retain=false 2024-12-07T13:27:31,602 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=85a851f2688c52b461cdf08f415e962c, regionState=OPENING, regionLocation=c7c455b68129,34335,1733578035593 2024-12-07T13:27:31,602 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=977deab04a51d2e8e101e5c7f7816b2b, regionState=OPENING, regionLocation=c7c455b68129,34335,1733578035593 2024-12-07T13:27:31,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=85a851f2688c52b461cdf08f415e962c, ASSIGN because future has completed 2024-12-07T13:27:31,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 85a851f2688c52b461cdf08f415e962c, server=c7c455b68129,34335,1733578035593}] 2024-12-07T13:27:31,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=977deab04a51d2e8e101e5c7f7816b2b, ASSIGN because future has completed 2024-12-07T13:27:31,611 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 977deab04a51d2e8e101e5c7f7816b2b, server=c7c455b68129,34335,1733578035593}] 2024-12-07T13:27:31,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:31,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:31,771 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:31,772 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 977deab04a51d2e8e101e5c7f7816b2b, NAME => 'TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-07T13:27:31,772 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,772 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:31,772 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,772 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,774 INFO [StoreOpener-977deab04a51d2e8e101e5c7f7816b2b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,776 INFO [StoreOpener-977deab04a51d2e8e101e5c7f7816b2b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 977deab04a51d2e8e101e5c7f7816b2b columnFamilyName info 2024-12-07T13:27:31,776 DEBUG [StoreOpener-977deab04a51d2e8e101e5c7f7816b2b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:31,791 DEBUG [StoreOpener-977deab04a51d2e8e101e5c7f7816b2b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45->hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/085f4f8c7f6347d48dc94f1ef2666f12-top 2024-12-07T13:27:31,796 DEBUG [StoreOpener-977deab04a51d2e8e101e5c7f7816b2b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-8c0a29dee7dc43a1acc0d0438647509e 2024-12-07T13:27:31,801 DEBUG [StoreOpener-977deab04a51d2e8e101e5c7f7816b2b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-b01896581d774266acdaca83c5712e72 2024-12-07T13:27:31,801 INFO [StoreOpener-977deab04a51d2e8e101e5c7f7816b2b-1 {}] regionserver.HStore(327): Store=977deab04a51d2e8e101e5c7f7816b2b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:27:31,801 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,802 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,803 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,803 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,803 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,805 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,806 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 977deab04a51d2e8e101e5c7f7816b2b; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769619, jitterRate=-0.02137945592403412}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T13:27:31,806 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:31,806 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 977deab04a51d2e8e101e5c7f7816b2b: Running coprocessor pre-open hook at 1733578051772Writing region info on filesystem at 1733578051772Initializing all the Stores at 1733578051774 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578051774Cleaning up temporary data from old regions at 1733578051803 (+29 ms)Running coprocessor post-open hooks at 1733578051806 (+3 ms)Region opened successfully at 1733578051806 2024-12-07T13:27:31,807 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., pid=13, masterSystemTime=1733578051764 2024-12-07T13:27:31,807 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 977deab04a51d2e8e101e5c7f7816b2b:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:31,807 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:31,807 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:31,809 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:31,809 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1541): 977deab04a51d2e8e101e5c7f7816b2b/info is initiating minor compaction (all files) 2024-12-07T13:27:31,809 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 977deab04a51d2e8e101e5c7f7816b2b/info in TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:31,809 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45->hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/085f4f8c7f6347d48dc94f1ef2666f12-top, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-8c0a29dee7dc43a1acc0d0438647509e, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-b01896581d774266acdaca83c5712e72] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp, totalSize=116.0 K 2024-12-07T13:27:31,810 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1733578046839 2024-12-07T13:27:31,810 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:31,810 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:31,810 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. 2024-12-07T13:27:31,810 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-8c0a29dee7dc43a1acc0d0438647509e, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733578051016 2024-12-07T13:27:31,810 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 85a851f2688c52b461cdf08f415e962c, NAME => 'TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-07T13:27:31,810 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,810 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-b01896581d774266acdaca83c5712e72, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733578051039 2024-12-07T13:27:31,810 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:31,810 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,810 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,810 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=977deab04a51d2e8e101e5c7f7816b2b, regionState=OPEN, openSeqNum=131, regionLocation=c7c455b68129,34335,1733578035593 2024-12-07T13:27:31,812 INFO [StoreOpener-85a851f2688c52b461cdf08f415e962c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,813 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-07T13:27:31,813 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-07T13:27:31,813 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-07T13:27:31,813 INFO [StoreOpener-85a851f2688c52b461cdf08f415e962c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 85a851f2688c52b461cdf08f415e962c columnFamilyName info 2024-12-07T13:27:31,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 977deab04a51d2e8e101e5c7f7816b2b, server=c7c455b68129,34335,1733578035593 because future has completed 2024-12-07T13:27:31,813 DEBUG [StoreOpener-85a851f2688c52b461cdf08f415e962c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:31,834 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38977 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=c7c455b68129,34335,1733578035593, table=TestLogRolling-testLogRolling, region=977deab04a51d2e8e101e5c7f7816b2b. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-07T13:27:31,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-12-07T13:27:31,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 977deab04a51d2e8e101e5c7f7816b2b, server=c7c455b68129,34335,1733578035593 in 264 msec 2024-12-07T13:27:31,885 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=977deab04a51d2e8e101e5c7f7816b2b, ASSIGN in 434 msec 2024-12-07T13:27:31,889 DEBUG [StoreOpener-85a851f2688c52b461cdf08f415e962c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45->hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/085f4f8c7f6347d48dc94f1ef2666f12-bottom 2024-12-07T13:27:31,889 INFO [StoreOpener-85a851f2688c52b461cdf08f415e962c-1 {}] regionserver.HStore(327): Store=85a851f2688c52b461cdf08f415e962c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:27:31,890 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,890 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,892 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,892 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,892 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,894 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,894 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 977deab04a51d2e8e101e5c7f7816b2b#info#compaction#67 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:31,894 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/info/f0d85069aad646b6ae30dfb56a9f58d8 is 193, key is TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b./info:regioninfo/1733578051810/Put/seqid=0 2024-12-07T13:27:31,894 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/e9c6ca7aa71e48858ee89145561c8651 is 1080, key is row0062/info:/1733578048974/Put/seqid=0 2024-12-07T13:27:31,895 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 85a851f2688c52b461cdf08f415e962c; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=833046, jitterRate=0.05927295982837677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T13:27:31,895 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:31,895 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 85a851f2688c52b461cdf08f415e962c: Running coprocessor pre-open hook at 1733578051811Writing region info on filesystem at 1733578051811Initializing all the Stores at 1733578051811Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578051812 (+1 ms)Cleaning up temporary data from old regions at 1733578051892 (+80 ms)Running coprocessor post-open hooks at 1733578051895 (+3 ms)Region opened successfully at 1733578051895 2024-12-07T13:27:31,896 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c., pid=12, masterSystemTime=1733578051764 2024-12-07T13:27:31,896 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 85a851f2688c52b461cdf08f415e962c:info, priority=-2147483648, current under compaction store size is 2 2024-12-07T13:27:31,896 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:31,896 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-07T13:27:31,897 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. 2024-12-07T13:27:31,897 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HStore(1541): 85a851f2688c52b461cdf08f415e962c/info is initiating minor compaction (all files) 2024-12-07T13:27:31,897 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 85a851f2688c52b461cdf08f415e962c/info in TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. 2024-12-07T13:27:31,897 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45->hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/085f4f8c7f6347d48dc94f1ef2666f12-bottom] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/.tmp, totalSize=86.3 K 2024-12-07T13:27:31,897 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] compactions.Compactor(225): Compacting 085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45, keycount=38, bloomtype=ROW, size=86.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1733578046839 2024-12-07T13:27:31,898 DEBUG [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. 2024-12-07T13:27:31,898 INFO [RS_OPEN_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. 2024-12-07T13:27:31,899 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=85a851f2688c52b461cdf08f415e962c, regionState=OPEN, openSeqNum=131, regionLocation=c7c455b68129,34335,1733578035593 2024-12-07T13:27:31,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741853_1029 (size=42984) 2024-12-07T13:27:31,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741853_1029 (size=42984) 2024-12-07T13:27:31,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741854_1030 (size=9882) 2024-12-07T13:27:31,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741854_1030 (size=9882) 2024-12-07T13:27:31,902 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/info/f0d85069aad646b6ae30dfb56a9f58d8 2024-12-07T13:27:31,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 85a851f2688c52b461cdf08f415e962c, server=c7c455b68129,34335,1733578035593 because future has completed 2024-12-07T13:27:31,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-12-07T13:27:31,905 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 85a851f2688c52b461cdf08f415e962c, server=c7c455b68129,34335,1733578035593 in 295 msec 2024-12-07T13:27:31,906 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/e9c6ca7aa71e48858ee89145561c8651 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e9c6ca7aa71e48858ee89145561c8651 2024-12-07T13:27:31,908 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-12-07T13:27:31,908 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=85a851f2688c52b461cdf08f415e962c, ASSIGN in 456 msec 2024-12-07T13:27:31,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=82e88219ec4ec4a4fff2a6f409a36e45, daughterA=85a851f2688c52b461cdf08f415e962c, daughterB=977deab04a51d2e8e101e5c7f7816b2b in 812 msec 2024-12-07T13:27:31,914 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 977deab04a51d2e8e101e5c7f7816b2b/info of 977deab04a51d2e8e101e5c7f7816b2b into e9c6ca7aa71e48858ee89145561c8651(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:31,914 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:31,914 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., storeName=977deab04a51d2e8e101e5c7f7816b2b/info, priority=13, startTime=1733578051807; duration=0sec 2024-12-07T13:27:31,914 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:31,914 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 977deab04a51d2e8e101e5c7f7816b2b:info 2024-12-07T13:27:31,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/ns/9b198faf61784ea6be5b49347702f7f8 is 43, key is default/ns:d/1733578036699/Put/seqid=0 2024-12-07T13:27:31,921 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 85a851f2688c52b461cdf08f415e962c#info#compaction#69 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:31,921 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/.tmp/info/4195273abbb34ab69e7b015199f9f8b3 is 1080, key is row0001/info:/1733578046839/Put/seqid=0 2024-12-07T13:27:31,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741855_1031 (size=5153) 2024-12-07T13:27:31,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741855_1031 (size=5153) 2024-12-07T13:27:31,926 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/ns/9b198faf61784ea6be5b49347702f7f8 2024-12-07T13:27:31,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741856_1032 (size=70862) 2024-12-07T13:27:31,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741856_1032 (size=70862) 2024-12-07T13:27:31,933 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/.tmp/info/4195273abbb34ab69e7b015199f9f8b3 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/info/4195273abbb34ab69e7b015199f9f8b3 2024-12-07T13:27:31,940 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 85a851f2688c52b461cdf08f415e962c/info of 85a851f2688c52b461cdf08f415e962c into 4195273abbb34ab69e7b015199f9f8b3(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:31,940 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 85a851f2688c52b461cdf08f415e962c: 2024-12-07T13:27:31,940 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c., storeName=85a851f2688c52b461cdf08f415e962c/info, priority=15, startTime=1733578051896; duration=0sec 2024-12-07T13:27:31,940 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:31,940 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 85a851f2688c52b461cdf08f415e962c:info 2024-12-07T13:27:31,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/table/909f9bb51ddb4e6d98d991b9d06ad1b0 is 65, key is TestLogRolling-testLogRolling/table:state/1733578037117/Put/seqid=0 2024-12-07T13:27:31,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741857_1033 (size=5340) 2024-12-07T13:27:31,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741857_1033 (size=5340) 2024-12-07T13:27:31,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/table/909f9bb51ddb4e6d98d991b9d06ad1b0 2024-12-07T13:27:31,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/info/f0d85069aad646b6ae30dfb56a9f58d8 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/info/f0d85069aad646b6ae30dfb56a9f58d8 2024-12-07T13:27:31,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/info/f0d85069aad646b6ae30dfb56a9f58d8, entries=30, sequenceid=17, filesize=9.7 K 2024-12-07T13:27:31,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/ns/9b198faf61784ea6be5b49347702f7f8 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/ns/9b198faf61784ea6be5b49347702f7f8 2024-12-07T13:27:31,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/ns/9b198faf61784ea6be5b49347702f7f8, entries=2, sequenceid=17, filesize=5.0 K 2024-12-07T13:27:31,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/table/909f9bb51ddb4e6d98d991b9d06ad1b0 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/table/909f9bb51ddb4e6d98d991b9d06ad1b0 2024-12-07T13:27:31,971 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/table/909f9bb51ddb4e6d98d991b9d06ad1b0, entries=2, sequenceid=17, filesize=5.2 K 2024-12-07T13:27:31,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 159ms, sequenceid=17, compaction requested=false 2024-12-07T13:27:31,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-07T13:27:32,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:32,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:33,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.3:51022 deadline: 1733578063059, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. is not online on c7c455b68129,34335,1733578035593 2024-12-07T13:27:33,086 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., hostname=c7c455b68129,34335,1733578035593, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., hostname=c7c455b68129,34335,1733578035593, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. is not online on c7c455b68129,34335,1733578035593 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T13:27:33,087 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., hostname=c7c455b68129,34335,1733578035593, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45. is not online on c7c455b68129,34335,1733578035593 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-07T13:27:33,087 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733578036732.82e88219ec4ec4a4fff2a6f409a36e45., hostname=c7c455b68129,34335,1733578035593, seqNum=2 from cache 2024-12-07T13:27:33,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:33,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:34,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:34,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:35,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:35,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:36,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,331 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,332 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,354 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:36,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:36,861 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-07T13:27:36,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,865 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,866 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,885 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,886 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:36,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-07T13:27:37,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:37,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:38,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:38,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:39,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:39,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:40,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:40,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:41,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:41,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:42,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:42,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:43,138 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., hostname=c7c455b68129,34335,1733578035593, seqNum=131] 2024-12-07T13:27:43,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:43,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:27:43,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/5a0955005bbd4d928f921759af2a1b27 is 1080, key is row0097/info:/1733578063139/Put/seqid=0 2024-12-07T13:27:43,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741858_1034 (size=12516) 2024-12-07T13:27:43,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741858_1034 (size=12516) 2024-12-07T13:27:43,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/5a0955005bbd4d928f921759af2a1b27 2024-12-07T13:27:43,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/5a0955005bbd4d928f921759af2a1b27 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/5a0955005bbd4d928f921759af2a1b27 2024-12-07T13:27:43,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/5a0955005bbd4d928f921759af2a1b27, entries=7, sequenceid=141, filesize=12.2 K 2024-12-07T13:27:43,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 977deab04a51d2e8e101e5c7f7816b2b in 23ms, sequenceid=141, compaction requested=false 2024-12-07T13:27:43,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:43,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:43,174 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-07T13:27:43,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/b50f590f4f0b43f3bd3b39a077ee3ece is 1080, key is row0104/info:/1733578063150/Put/seqid=0 2024-12-07T13:27:43,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741859_1035 (size=17906) 2024-12-07T13:27:43,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741859_1035 (size=17906) 2024-12-07T13:27:43,183 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/b50f590f4f0b43f3bd3b39a077ee3ece 2024-12-07T13:27:43,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/b50f590f4f0b43f3bd3b39a077ee3ece as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b50f590f4f0b43f3bd3b39a077ee3ece 2024-12-07T13:27:43,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b50f590f4f0b43f3bd3b39a077ee3ece, entries=12, sequenceid=156, filesize=17.5 K 2024-12-07T13:27:43,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 977deab04a51d2e8e101e5c7f7816b2b in 22ms, sequenceid=156, compaction requested=true 2024-12-07T13:27:43,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:43,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 977deab04a51d2e8e101e5c7f7816b2b:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:43,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:43,197 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:43,198 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73406 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:43,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:43,198 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HStore(1541): 977deab04a51d2e8e101e5c7f7816b2b/info is initiating minor compaction (all files) 2024-12-07T13:27:43,198 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 977deab04a51d2e8e101e5c7f7816b2b/info in TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:43,198 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e9c6ca7aa71e48858ee89145561c8651, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/5a0955005bbd4d928f921759af2a1b27, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b50f590f4f0b43f3bd3b39a077ee3ece] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp, totalSize=71.7 K 2024-12-07T13:27:43,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-07T13:27:43,199 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] compactions.Compactor(225): Compacting e9c6ca7aa71e48858ee89145561c8651, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733578048974 2024-12-07T13:27:43,199 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] compactions.Compactor(225): Compacting 5a0955005bbd4d928f921759af2a1b27, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1733578063139 2024-12-07T13:27:43,199 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] compactions.Compactor(225): Compacting b50f590f4f0b43f3bd3b39a077ee3ece, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733578063150 2024-12-07T13:27:43,202 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/4a414b4eeb234cfbac642608c759b88c is 1080, key is row0116/info:/1733578063175/Put/seqid=0 2024-12-07T13:27:43,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741860_1036 (size=17906) 2024-12-07T13:27:43,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741860_1036 (size=17906) 2024-12-07T13:27:43,211 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/4a414b4eeb234cfbac642608c759b88c 2024-12-07T13:27:43,213 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 977deab04a51d2e8e101e5c7f7816b2b#info#compaction#75 average throughput is 55.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:43,214 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/235e2ceee5954cfe9eca59ad06e08946 is 1080, key is row0062/info:/1733578048974/Put/seqid=0 2024-12-07T13:27:43,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/4a414b4eeb234cfbac642608c759b88c as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4a414b4eeb234cfbac642608c759b88c 2024-12-07T13:27:43,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741861_1037 (size=63636) 2024-12-07T13:27:43,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741861_1037 (size=63636) 2024-12-07T13:27:43,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4a414b4eeb234cfbac642608c759b88c, entries=12, sequenceid=171, filesize=17.5 K 2024-12-07T13:27:43,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 977deab04a51d2e8e101e5c7f7816b2b in 24ms, sequenceid=171, compaction requested=false 2024-12-07T13:27:43,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:43,224 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/235e2ceee5954cfe9eca59ad06e08946 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/235e2ceee5954cfe9eca59ad06e08946 2024-12-07T13:27:43,229 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 977deab04a51d2e8e101e5c7f7816b2b/info of 977deab04a51d2e8e101e5c7f7816b2b into 235e2ceee5954cfe9eca59ad06e08946(size=62.1 K), total size for store is 79.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:43,229 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:43,229 INFO [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., storeName=977deab04a51d2e8e101e5c7f7816b2b/info, priority=13, startTime=1733578063197; duration=0sec 2024-12-07T13:27:43,229 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:43,229 DEBUG [RS:0;c7c455b68129:34335-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 977deab04a51d2e8e101e5c7f7816b2b:info 2024-12-07T13:27:43,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:43,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:44,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:44,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:45,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:45,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:27:45,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/7bee0fc6f51d4917a1db669bb75f9664 is 1080, key is row0128/info:/1733578063199/Put/seqid=0 2024-12-07T13:27:45,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741862_1038 (size=12516) 2024-12-07T13:27:45,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741862_1038 (size=12516) 2024-12-07T13:27:45,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/7bee0fc6f51d4917a1db669bb75f9664 2024-12-07T13:27:45,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/7bee0fc6f51d4917a1db669bb75f9664 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/7bee0fc6f51d4917a1db669bb75f9664 2024-12-07T13:27:45,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/7bee0fc6f51d4917a1db669bb75f9664, entries=7, sequenceid=182, filesize=12.2 K 2024-12-07T13:27:45,236 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9684 for 977deab04a51d2e8e101e5c7f7816b2b in 20ms, sequenceid=182, compaction requested=true 2024-12-07T13:27:45,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:45,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 977deab04a51d2e8e101e5c7f7816b2b:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:45,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:45,236 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:45,237 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94058 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:45,237 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1541): 977deab04a51d2e8e101e5c7f7816b2b/info is initiating minor compaction (all files) 2024-12-07T13:27:45,237 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 977deab04a51d2e8e101e5c7f7816b2b/info in TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:45,237 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/235e2ceee5954cfe9eca59ad06e08946, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4a414b4eeb234cfbac642608c759b88c, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/7bee0fc6f51d4917a1db669bb75f9664] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp, totalSize=91.9 K 2024-12-07T13:27:45,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:45,238 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 235e2ceee5954cfe9eca59ad06e08946, keycount=54, bloomtype=ROW, size=62.1 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733578048974 2024-12-07T13:27:45,238 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-07T13:27:45,238 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a414b4eeb234cfbac642608c759b88c, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733578063175 2024-12-07T13:27:45,238 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7bee0fc6f51d4917a1db669bb75f9664, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1733578063199 2024-12-07T13:27:45,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/4eeaac854591462499d5eb516b17e1ff is 1080, key is row0135/info:/1733578065217/Put/seqid=0 2024-12-07T13:27:45,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741863_1039 (size=16828) 2024-12-07T13:27:45,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741863_1039 (size=16828) 2024-12-07T13:27:45,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/4eeaac854591462499d5eb516b17e1ff 2024-12-07T13:27:45,255 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 977deab04a51d2e8e101e5c7f7816b2b#info#compaction#78 average throughput is 37.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:45,256 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/35791c8115cb4dd2bbb4ed11d5e7a762 is 1080, key is row0062/info:/1733578048974/Put/seqid=0 2024-12-07T13:27:45,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/4eeaac854591462499d5eb516b17e1ff as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4eeaac854591462499d5eb516b17e1ff 2024-12-07T13:27:45,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741864_1040 (size=84293) 2024-12-07T13:27:45,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741864_1040 (size=84293) 2024-12-07T13:27:45,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4eeaac854591462499d5eb516b17e1ff, entries=11, sequenceid=196, filesize=16.4 K 2024-12-07T13:27:45,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 977deab04a51d2e8e101e5c7f7816b2b in 39ms, sequenceid=196, compaction requested=false 2024-12-07T13:27:45,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:45,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:45,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-07T13:27:45,279 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/35791c8115cb4dd2bbb4ed11d5e7a762 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/35791c8115cb4dd2bbb4ed11d5e7a762 2024-12-07T13:27:45,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/c463461b64bc4cb591d1fc5b771e849f is 1080, key is row0146/info:/1733578065239/Put/seqid=0 2024-12-07T13:27:45,286 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 977deab04a51d2e8e101e5c7f7816b2b/info of 977deab04a51d2e8e101e5c7f7816b2b into 35791c8115cb4dd2bbb4ed11d5e7a762(size=82.3 K), total size for store is 98.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:45,286 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:45,287 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., storeName=977deab04a51d2e8e101e5c7f7816b2b/info, priority=13, startTime=1733578065236; duration=0sec 2024-12-07T13:27:45,287 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:45,287 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 977deab04a51d2e8e101e5c7f7816b2b:info 2024-12-07T13:27:45,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741865_1041 (size=20078) 2024-12-07T13:27:45,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741865_1041 (size=20078) 2024-12-07T13:27:45,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/c463461b64bc4cb591d1fc5b771e849f 2024-12-07T13:27:45,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/c463461b64bc4cb591d1fc5b771e849f as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c463461b64bc4cb591d1fc5b771e849f 2024-12-07T13:27:45,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c463461b64bc4cb591d1fc5b771e849f, entries=14, sequenceid=213, filesize=19.6 K 2024-12-07T13:27:45,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=1.05 KB/1076 for 977deab04a51d2e8e101e5c7f7816b2b in 21ms, sequenceid=213, compaction requested=true 2024-12-07T13:27:45,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:45,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 977deab04a51d2e8e101e5c7f7816b2b:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:45,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:45,299 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:45,300 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 121199 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:45,300 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1541): 977deab04a51d2e8e101e5c7f7816b2b/info is initiating minor compaction (all files) 2024-12-07T13:27:45,300 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 977deab04a51d2e8e101e5c7f7816b2b/info in TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:45,300 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/35791c8115cb4dd2bbb4ed11d5e7a762, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4eeaac854591462499d5eb516b17e1ff, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c463461b64bc4cb591d1fc5b771e849f] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp, totalSize=118.4 K 2024-12-07T13:27:45,301 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 35791c8115cb4dd2bbb4ed11d5e7a762, keycount=73, bloomtype=ROW, size=82.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1733578048974 2024-12-07T13:27:45,301 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4eeaac854591462499d5eb516b17e1ff, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733578065217 2024-12-07T13:27:45,301 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting c463461b64bc4cb591d1fc5b771e849f, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733578065239 2024-12-07T13:27:45,312 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 977deab04a51d2e8e101e5c7f7816b2b#info#compaction#80 average throughput is 33.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:45,313 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/b17c85c32c044aa09b79dc90ce01208b is 1080, key is row0062/info:/1733578048974/Put/seqid=0 2024-12-07T13:27:45,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741866_1042 (size=111353) 2024-12-07T13:27:45,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741866_1042 (size=111353) 2024-12-07T13:27:45,323 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/b17c85c32c044aa09b79dc90ce01208b as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b17c85c32c044aa09b79dc90ce01208b 2024-12-07T13:27:45,329 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 977deab04a51d2e8e101e5c7f7816b2b/info of 977deab04a51d2e8e101e5c7f7816b2b into b17c85c32c044aa09b79dc90ce01208b(size=108.7 K), total size for store is 108.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:45,329 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:45,329 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., storeName=977deab04a51d2e8e101e5c7f7816b2b/info, priority=13, startTime=1733578065299; duration=0sec 2024-12-07T13:27:45,329 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:45,329 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 977deab04a51d2e8e101e5c7f7816b2b:info 2024-12-07T13:27:45,387 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T13:27:45,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:45,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:46,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:46,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:47,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:47,309 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:27:47,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/e2851ec985fc4680a49a68b8a00bde6f is 1080, key is row0160/info:/1733578065279/Put/seqid=0 2024-12-07T13:27:47,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741867_1043 (size=12516) 2024-12-07T13:27:47,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741867_1043 (size=12516) 2024-12-07T13:27:47,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=225 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/e2851ec985fc4680a49a68b8a00bde6f 2024-12-07T13:27:47,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/e2851ec985fc4680a49a68b8a00bde6f as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e2851ec985fc4680a49a68b8a00bde6f 2024-12-07T13:27:47,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e2851ec985fc4680a49a68b8a00bde6f, entries=7, sequenceid=225, filesize=12.2 K 2024-12-07T13:27:47,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 977deab04a51d2e8e101e5c7f7816b2b in 24ms, sequenceid=225, compaction requested=false 2024-12-07T13:27:47,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:47,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:47,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-07T13:27:47,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/9e5a9ba49bc64b6894e4813538bd0038 is 1080, key is row0167/info:/1733578067310/Put/seqid=0 2024-12-07T13:27:47,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741868_1044 (size=17906) 2024-12-07T13:27:47,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741868_1044 (size=17906) 2024-12-07T13:27:47,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/9e5a9ba49bc64b6894e4813538bd0038 2024-12-07T13:27:47,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/9e5a9ba49bc64b6894e4813538bd0038 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/9e5a9ba49bc64b6894e4813538bd0038 2024-12-07T13:27:47,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/9e5a9ba49bc64b6894e4813538bd0038, entries=12, sequenceid=240, filesize=17.5 K 2024-12-07T13:27:47,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 977deab04a51d2e8e101e5c7f7816b2b in 79ms, sequenceid=240, compaction requested=true 2024-12-07T13:27:47,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:47,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:47,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 977deab04a51d2e8e101e5c7f7816b2b:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:47,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:47,413 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:47,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-07T13:27:47,414 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 141775 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:47,414 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1541): 977deab04a51d2e8e101e5c7f7816b2b/info is initiating minor compaction (all files) 2024-12-07T13:27:47,414 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 977deab04a51d2e8e101e5c7f7816b2b/info in TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:47,414 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b17c85c32c044aa09b79dc90ce01208b, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e2851ec985fc4680a49a68b8a00bde6f, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/9e5a9ba49bc64b6894e4813538bd0038] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp, totalSize=138.5 K 2024-12-07T13:27:47,414 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting b17c85c32c044aa09b79dc90ce01208b, keycount=98, bloomtype=ROW, size=108.7 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1733578048974 2024-12-07T13:27:47,415 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting e2851ec985fc4680a49a68b8a00bde6f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=225, earliestPutTs=1733578065279 2024-12-07T13:27:47,415 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9e5a9ba49bc64b6894e4813538bd0038, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733578067310 2024-12-07T13:27:47,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/b871865e434545138b66a4fb6a26b7cc is 1080, key is row0179/info:/1733578067334/Put/seqid=0 2024-12-07T13:27:47,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741869_1045 (size=16828) 2024-12-07T13:27:47,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741869_1045 (size=16828) 2024-12-07T13:27:47,422 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/b871865e434545138b66a4fb6a26b7cc 2024-12-07T13:27:47,427 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 977deab04a51d2e8e101e5c7f7816b2b#info#compaction#84 average throughput is 60.03 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:47,427 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/362f5394705b4a429c64ebce8b0df0ac is 1080, key is row0062/info:/1733578048974/Put/seqid=0 2024-12-07T13:27:47,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/b871865e434545138b66a4fb6a26b7cc as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b871865e434545138b66a4fb6a26b7cc 2024-12-07T13:27:47,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b871865e434545138b66a4fb6a26b7cc, entries=11, sequenceid=254, filesize=16.4 K 2024-12-07T13:27:47,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741870_1046 (size=132069) 2024-12-07T13:27:47,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=3.15 KB/3228 for 977deab04a51d2e8e101e5c7f7816b2b in 21ms, sequenceid=254, compaction requested=false 2024-12-07T13:27:47,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:47,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741870_1046 (size=132069) 2024-12-07T13:27:47,439 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/362f5394705b4a429c64ebce8b0df0ac as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/362f5394705b4a429c64ebce8b0df0ac 2024-12-07T13:27:47,444 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 977deab04a51d2e8e101e5c7f7816b2b/info of 977deab04a51d2e8e101e5c7f7816b2b into 362f5394705b4a429c64ebce8b0df0ac(size=129.0 K), total size for store is 145.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:47,444 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:47,444 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., storeName=977deab04a51d2e8e101e5c7f7816b2b/info, priority=13, startTime=1733578067412; duration=0sec 2024-12-07T13:27:47,444 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:47,444 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 977deab04a51d2e8e101e5c7f7816b2b:info 2024-12-07T13:27:47,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:47,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:48,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:48,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:49,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:49,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:27:49,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/08fdf19cfdc84f4d916196fdcebc7f3b is 1080, key is row0190/info:/1733578067414/Put/seqid=0 2024-12-07T13:27:49,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741871_1047 (size=12523) 2024-12-07T13:27:49,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741871_1047 (size=12523) 2024-12-07T13:27:49,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/08fdf19cfdc84f4d916196fdcebc7f3b 2024-12-07T13:27:49,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/08fdf19cfdc84f4d916196fdcebc7f3b as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/08fdf19cfdc84f4d916196fdcebc7f3b 2024-12-07T13:27:49,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/08fdf19cfdc84f4d916196fdcebc7f3b, entries=7, sequenceid=265, filesize=12.2 K 2024-12-07T13:27:49,462 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 977deab04a51d2e8e101e5c7f7816b2b in 27ms, sequenceid=265, compaction requested=true 2024-12-07T13:27:49,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:49,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 977deab04a51d2e8e101e5c7f7816b2b:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:49,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:49,462 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:49,463 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 161420 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:49,463 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1541): 977deab04a51d2e8e101e5c7f7816b2b/info is initiating minor compaction (all files) 2024-12-07T13:27:49,463 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 977deab04a51d2e8e101e5c7f7816b2b/info in TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:49,463 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/362f5394705b4a429c64ebce8b0df0ac, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b871865e434545138b66a4fb6a26b7cc, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/08fdf19cfdc84f4d916196fdcebc7f3b] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp, totalSize=157.6 K 2024-12-07T13:27:49,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:49,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-07T13:27:49,464 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 362f5394705b4a429c64ebce8b0df0ac, keycount=117, bloomtype=ROW, size=129.0 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1733578048974 2024-12-07T13:27:49,464 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting b871865e434545138b66a4fb6a26b7cc, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1733578067334 2024-12-07T13:27:49,464 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 08fdf19cfdc84f4d916196fdcebc7f3b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733578067414 2024-12-07T13:27:49,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/c6b0bf0cc67e4c7bacd4e980a3fa4081 is 1080, key is row0197/info:/1733578069437/Put/seqid=0 2024-12-07T13:27:49,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741872_1048 (size=20092) 2024-12-07T13:27:49,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741872_1048 (size=20092) 2024-12-07T13:27:49,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/c6b0bf0cc67e4c7bacd4e980a3fa4081 2024-12-07T13:27:49,475 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 977deab04a51d2e8e101e5c7f7816b2b#info#compaction#87 average throughput is 69.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:49,476 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/98052c7bad5047689e02dbf19f126cf1 is 1080, key is row0062/info:/1733578048974/Put/seqid=0 2024-12-07T13:27:49,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741873_1049 (size=151655) 2024-12-07T13:27:49,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741873_1049 (size=151655) 2024-12-07T13:27:49,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/c6b0bf0cc67e4c7bacd4e980a3fa4081 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c6b0bf0cc67e4c7bacd4e980a3fa4081 2024-12-07T13:27:49,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c6b0bf0cc67e4c7bacd4e980a3fa4081, entries=14, sequenceid=282, filesize=19.6 K 2024-12-07T13:27:49,484 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/98052c7bad5047689e02dbf19f126cf1 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/98052c7bad5047689e02dbf19f126cf1 2024-12-07T13:27:49,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=10.51 KB/10760 for 977deab04a51d2e8e101e5c7f7816b2b in 20ms, sequenceid=282, compaction requested=false 2024-12-07T13:27:49,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:49,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:49,485 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-07T13:27:49,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/6b6a5f23730a47148ae237a6a88d2bf4 is 1080, key is row0211/info:/1733578069465/Put/seqid=0 2024-12-07T13:27:49,490 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 977deab04a51d2e8e101e5c7f7816b2b/info of 977deab04a51d2e8e101e5c7f7816b2b into 98052c7bad5047689e02dbf19f126cf1(size=148.1 K), total size for store is 167.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:49,490 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:49,490 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., storeName=977deab04a51d2e8e101e5c7f7816b2b/info, priority=13, startTime=1733578069462; duration=0sec 2024-12-07T13:27:49,491 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:49,491 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 977deab04a51d2e8e101e5c7f7816b2b:info 2024-12-07T13:27:49,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741874_1050 (size=16839) 2024-12-07T13:27:49,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741874_1050 (size=16839) 2024-12-07T13:27:49,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/6b6a5f23730a47148ae237a6a88d2bf4 2024-12-07T13:27:49,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/6b6a5f23730a47148ae237a6a88d2bf4 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/6b6a5f23730a47148ae237a6a88d2bf4 2024-12-07T13:27:49,507 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/6b6a5f23730a47148ae237a6a88d2bf4, entries=11, sequenceid=296, filesize=16.4 K 2024-12-07T13:27:49,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=3.15 KB/3228 for 977deab04a51d2e8e101e5c7f7816b2b in 23ms, sequenceid=296, compaction requested=true 2024-12-07T13:27:49,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:49,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 977deab04a51d2e8e101e5c7f7816b2b:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:49,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:49,508 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:49,509 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 188586 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:49,509 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1541): 977deab04a51d2e8e101e5c7f7816b2b/info is initiating minor compaction (all files) 2024-12-07T13:27:49,509 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 977deab04a51d2e8e101e5c7f7816b2b/info in TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:49,510 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/98052c7bad5047689e02dbf19f126cf1, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c6b0bf0cc67e4c7bacd4e980a3fa4081, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/6b6a5f23730a47148ae237a6a88d2bf4] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp, totalSize=184.2 K 2024-12-07T13:27:49,510 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 98052c7bad5047689e02dbf19f126cf1, keycount=135, bloomtype=ROW, size=148.1 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733578048974 2024-12-07T13:27:49,510 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting c6b0bf0cc67e4c7bacd4e980a3fa4081, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1733578069437 2024-12-07T13:27:49,511 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6b6a5f23730a47148ae237a6a88d2bf4, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733578069465 2024-12-07T13:27:49,523 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 977deab04a51d2e8e101e5c7f7816b2b#info#compaction#89 average throughput is 54.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:49,523 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/a13e09086e8b4f9e972e8a5f90b8bf05 is 1080, key is row0062/info:/1733578048974/Put/seqid=0 2024-12-07T13:27:49,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741875_1051 (size=178740) 2024-12-07T13:27:49,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741875_1051 (size=178740) 2024-12-07T13:27:49,532 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/a13e09086e8b4f9e972e8a5f90b8bf05 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/a13e09086e8b4f9e972e8a5f90b8bf05 2024-12-07T13:27:49,537 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 977deab04a51d2e8e101e5c7f7816b2b/info of 977deab04a51d2e8e101e5c7f7816b2b into a13e09086e8b4f9e972e8a5f90b8bf05(size=174.6 K), total size for store is 174.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:49,537 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:49,537 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., storeName=977deab04a51d2e8e101e5c7f7816b2b/info, priority=13, startTime=1733578069508; duration=0sec 2024-12-07T13:27:49,537 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:49,537 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 977deab04a51d2e8e101e5c7f7816b2b:info 2024-12-07T13:27:49,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:49,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:50,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:50,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:51,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:51,501 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-07T13:27:51,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/ddaaabb151e14ff5b872633e182dcd25 is 1080, key is row0222/info:/1733578069486/Put/seqid=0 2024-12-07T13:27:51,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741876_1052 (size=12523) 2024-12-07T13:27:51,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741876_1052 (size=12523) 2024-12-07T13:27:51,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/ddaaabb151e14ff5b872633e182dcd25 2024-12-07T13:27:51,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/ddaaabb151e14ff5b872633e182dcd25 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/ddaaabb151e14ff5b872633e182dcd25 2024-12-07T13:27:51,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/ddaaabb151e14ff5b872633e182dcd25, entries=7, sequenceid=308, filesize=12.2 K 2024-12-07T13:27:51,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 977deab04a51d2e8e101e5c7f7816b2b in 25ms, sequenceid=308, compaction requested=false 2024-12-07T13:27:51,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:51,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:51,528 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-07T13:27:51,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/58f00c2dd663471e8557c17a4ac637c6 is 1080, key is row0229/info:/1733578071503/Put/seqid=0 2024-12-07T13:27:51,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741877_1053 (size=17918) 2024-12-07T13:27:51,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741877_1053 (size=17918) 2024-12-07T13:27:51,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/58f00c2dd663471e8557c17a4ac637c6 2024-12-07T13:27:51,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/58f00c2dd663471e8557c17a4ac637c6 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/58f00c2dd663471e8557c17a4ac637c6 2024-12-07T13:27:51,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/58f00c2dd663471e8557c17a4ac637c6, entries=12, sequenceid=323, filesize=17.5 K 2024-12-07T13:27:51,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 977deab04a51d2e8e101e5c7f7816b2b in 21ms, sequenceid=323, compaction requested=true 2024-12-07T13:27:51,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:51,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 977deab04a51d2e8e101e5c7f7816b2b:info, priority=-2147483648, current under compaction store size is 1 2024-12-07T13:27:51,550 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:51,550 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-07T13:27:51,550 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 209181 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-07T13:27:51,551 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1541): 977deab04a51d2e8e101e5c7f7816b2b/info is initiating minor compaction (all files) 2024-12-07T13:27:51,551 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 977deab04a51d2e8e101e5c7f7816b2b/info in TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:51,551 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/a13e09086e8b4f9e972e8a5f90b8bf05, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/ddaaabb151e14ff5b872633e182dcd25, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/58f00c2dd663471e8557c17a4ac637c6] into tmpdir=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp, totalSize=204.3 K 2024-12-07T13:27:51,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34335 {}] regionserver.HRegion(8855): Flush requested on 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:51,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-07T13:27:51,551 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting a13e09086e8b4f9e972e8a5f90b8bf05, keycount=160, bloomtype=ROW, size=174.6 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1733578048974 2024-12-07T13:27:51,552 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting ddaaabb151e14ff5b872633e182dcd25, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1733578069486 2024-12-07T13:27:51,552 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] compactions.Compactor(225): Compacting 58f00c2dd663471e8557c17a4ac637c6, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1733578071503 2024-12-07T13:27:51,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/f6250b120d3449049d89ae1b2b01a039 is 1080, key is row0241/info:/1733578071529/Put/seqid=0 2024-12-07T13:27:51,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741878_1054 (size=17918) 2024-12-07T13:27:51,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741878_1054 (size=17918) 2024-12-07T13:27:51,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/f6250b120d3449049d89ae1b2b01a039 2024-12-07T13:27:51,565 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 977deab04a51d2e8e101e5c7f7816b2b#info#compaction#93 average throughput is 61.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-07T13:27:51,566 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/5c83f7026ec0434bb5b1a314e0f76695 is 1080, key is row0062/info:/1733578048974/Put/seqid=0 2024-12-07T13:27:51,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/f6250b120d3449049d89ae1b2b01a039 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/f6250b120d3449049d89ae1b2b01a039 2024-12-07T13:27:51,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741879_1055 (size=199347) 2024-12-07T13:27:51,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741879_1055 (size=199347) 2024-12-07T13:27:51,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/f6250b120d3449049d89ae1b2b01a039, entries=12, sequenceid=338, filesize=17.5 K 2024-12-07T13:27:51,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=4.20 KB/4304 for 977deab04a51d2e8e101e5c7f7816b2b in 21ms, sequenceid=338, compaction requested=false 2024-12-07T13:27:51,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:51,576 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/5c83f7026ec0434bb5b1a314e0f76695 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/5c83f7026ec0434bb5b1a314e0f76695 2024-12-07T13:27:51,582 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 977deab04a51d2e8e101e5c7f7816b2b/info of 977deab04a51d2e8e101e5c7f7816b2b into 5c83f7026ec0434bb5b1a314e0f76695(size=194.7 K), total size for store is 212.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-07T13:27:51,582 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:51,582 INFO [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., storeName=977deab04a51d2e8e101e5c7f7816b2b/info, priority=13, startTime=1733578071549; duration=0sec 2024-12-07T13:27:51,582 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-07T13:27:51,582 DEBUG [RS:0;c7c455b68129:34335-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 977deab04a51d2e8e101e5c7f7816b2b:info 2024-12-07T13:27:51,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:51,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:52,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:52,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:53,560 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-07T13:27:53,560 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C34335%2C1733578035593.1733578073560 2024-12-07T13:27:53,568 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,568 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,568 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,568 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,568 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,568 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593/c7c455b68129%2C34335%2C1733578035593.1733578036106 with entries=320, filesize=311.00 KB; new WAL /user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593/c7c455b68129%2C34335%2C1733578035593.1733578073560 2024-12-07T13:27:53,570 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39113:39113),(127.0.0.1/127.0.0.1:39449:39449)] 2024-12-07T13:27:53,570 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593/c7c455b68129%2C34335%2C1733578035593.1733578036106 is not closed yet, will try archiving it next time 2024-12-07T13:27:53,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741833_1009 (size=318475) 2024-12-07T13:27:53,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741833_1009 (size=318475) 2024-12-07T13:27:53,577 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 977deab04a51d2e8e101e5c7f7816b2b 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-07T13:27:53,582 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/d6f2410b79f146efa175a1bfa049f2cb is 1080, key is row0253/info:/1733578071552/Put/seqid=0 2024-12-07T13:27:53,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741881_1057 (size=9278) 2024-12-07T13:27:53,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741881_1057 (size=9278) 2024-12-07T13:27:53,586 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/d6f2410b79f146efa175a1bfa049f2cb 2024-12-07T13:27:53,591 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/.tmp/info/d6f2410b79f146efa175a1bfa049f2cb as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/d6f2410b79f146efa175a1bfa049f2cb 2024-12-07T13:27:53,598 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/d6f2410b79f146efa175a1bfa049f2cb, entries=4, sequenceid=346, filesize=9.1 K 2024-12-07T13:27:53,599 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 977deab04a51d2e8e101e5c7f7816b2b in 22ms, sequenceid=346, compaction requested=true 2024-12-07T13:27:53,599 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 977deab04a51d2e8e101e5c7f7816b2b: 2024-12-07T13:27:53,599 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-07T13:27:53,603 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/info/d09302dd4d78431b82c785d1fc3181af is 186, key is TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c./info:regioninfo/1733578051899/Put/seqid=0 2024-12-07T13:27:53,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741882_1058 (size=6153) 2024-12-07T13:27:53,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741882_1058 (size=6153) 2024-12-07T13:27:53,608 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/info/d09302dd4d78431b82c785d1fc3181af 2024-12-07T13:27:53,613 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/.tmp/info/d09302dd4d78431b82c785d1fc3181af as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/info/d09302dd4d78431b82c785d1fc3181af 2024-12-07T13:27:53,617 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/info/d09302dd4d78431b82c785d1fc3181af, entries=5, sequenceid=21, filesize=6.0 K 2024-12-07T13:27:53,618 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-12-07T13:27:53,618 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-07T13:27:53,618 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 85a851f2688c52b461cdf08f415e962c: 2024-12-07T13:27:53,618 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C34335%2C1733578035593.1733578073618 2024-12-07T13:27:53,625 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,625 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,625 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,625 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,625 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,626 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593/c7c455b68129%2C34335%2C1733578035593.1733578073560 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593/c7c455b68129%2C34335%2C1733578035593.1733578073618 2024-12-07T13:27:53,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741880_1056 (size=731) 2024-12-07T13:27:53,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741880_1056 (size=731) 2024-12-07T13:27:53,630 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593/c7c455b68129%2C34335%2C1733578035593.1733578036106 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/oldWALs/c7c455b68129%2C34335%2C1733578035593.1733578036106 2024-12-07T13:27:53,631 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/WALs/c7c455b68129,34335,1733578035593/c7c455b68129%2C34335%2C1733578035593.1733578073560 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/oldWALs/c7c455b68129%2C34335%2C1733578035593.1733578073560 2024-12-07T13:27:53,635 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39113:39113),(127.0.0.1/127.0.0.1:39449:39449)] 2024-12-07T13:27:53,636 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T13:27:53,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T13:27:53,636 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:27:53,636 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:27:53,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:53,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:53,636 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T13:27:53,636 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T13:27:53,636 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=914229464, stopped=false 2024-12-07T13:27:53,636 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c7c455b68129,38977,1733578035403 2024-12-07T13:27:53,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:53,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:53,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:27:53,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:27:53,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:53,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:53,746 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:27:53,746 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:27:53,746 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:27:53,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:53,747 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:27:53,747 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c7c455b68129,34335,1733578035593' ***** 2024-12-07T13:27:53,747 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:27:53,747 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T13:27:53,747 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T13:27:53,747 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(3091): Received CLOSE for 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(3091): Received CLOSE for 85a851f2688c52b461cdf08f415e962c 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(959): stopping server c7c455b68129,34335,1733578035593 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:27:53,748 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 977deab04a51d2e8e101e5c7f7816b2b, disabling compactions & flushes 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c7c455b68129:34335. 2024-12-07T13:27:53,748 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:53,748 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:53,748 DEBUG [RS:0;c7c455b68129:34335 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:27:53,748 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. after waiting 0 ms 2024-12-07T13:27:53,748 DEBUG [RS:0;c7c455b68129:34335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:53,748 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T13:27:53,748 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T13:27:53,749 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-07T13:27:53,749 DEBUG [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(1325): Online Regions={977deab04a51d2e8e101e5c7f7816b2b=TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b., 1588230740=hbase:meta,,1.1588230740, 85a851f2688c52b461cdf08f415e962c=TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c.} 2024-12-07T13:27:53,749 DEBUG [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 85a851f2688c52b461cdf08f415e962c, 977deab04a51d2e8e101e5c7f7816b2b 2024-12-07T13:27:53,749 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:27:53,749 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:27:53,749 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:27:53,749 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:27:53,749 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:27:53,749 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45->hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/085f4f8c7f6347d48dc94f1ef2666f12-top, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-8c0a29dee7dc43a1acc0d0438647509e, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e9c6ca7aa71e48858ee89145561c8651, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-b01896581d774266acdaca83c5712e72, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/5a0955005bbd4d928f921759af2a1b27, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/235e2ceee5954cfe9eca59ad06e08946, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b50f590f4f0b43f3bd3b39a077ee3ece, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4a414b4eeb234cfbac642608c759b88c, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/35791c8115cb4dd2bbb4ed11d5e7a762, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/7bee0fc6f51d4917a1db669bb75f9664, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4eeaac854591462499d5eb516b17e1ff, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b17c85c32c044aa09b79dc90ce01208b, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c463461b64bc4cb591d1fc5b771e849f, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e2851ec985fc4680a49a68b8a00bde6f, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/362f5394705b4a429c64ebce8b0df0ac, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/9e5a9ba49bc64b6894e4813538bd0038, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b871865e434545138b66a4fb6a26b7cc, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/98052c7bad5047689e02dbf19f126cf1, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/08fdf19cfdc84f4d916196fdcebc7f3b, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c6b0bf0cc67e4c7bacd4e980a3fa4081, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/a13e09086e8b4f9e972e8a5f90b8bf05, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/6b6a5f23730a47148ae237a6a88d2bf4, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/ddaaabb151e14ff5b872633e182dcd25, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/58f00c2dd663471e8557c17a4ac637c6] to archive 2024-12-07T13:27:53,750 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T13:27:53,752 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:53,754 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-8c0a29dee7dc43a1acc0d0438647509e to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-8c0a29dee7dc43a1acc0d0438647509e 2024-12-07T13:27:53,754 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-07T13:27:53,754 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:27:53,754 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:27:53,754 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733578073749Running coprocessor pre-close hooks at 1733578073749Disabling compacts and flushes for region at 1733578073749Disabling writes for close at 1733578073749Writing region close event to WAL at 1733578073750 (+1 ms)Running coprocessor post-close hooks at 1733578073754 (+4 ms)Closed at 1733578073754 2024-12-07T13:27:53,755 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T13:27:53,755 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e9c6ca7aa71e48858ee89145561c8651 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e9c6ca7aa71e48858ee89145561c8651 2024-12-07T13:27:53,757 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-b01896581d774266acdaca83c5712e72 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/TestLogRolling-testLogRolling=82e88219ec4ec4a4fff2a6f409a36e45-b01896581d774266acdaca83c5712e72 2024-12-07T13:27:53,758 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/5a0955005bbd4d928f921759af2a1b27 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/5a0955005bbd4d928f921759af2a1b27 2024-12-07T13:27:53,760 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/235e2ceee5954cfe9eca59ad06e08946 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/235e2ceee5954cfe9eca59ad06e08946 2024-12-07T13:27:53,761 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b50f590f4f0b43f3bd3b39a077ee3ece to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b50f590f4f0b43f3bd3b39a077ee3ece 2024-12-07T13:27:53,763 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4a414b4eeb234cfbac642608c759b88c to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4a414b4eeb234cfbac642608c759b88c 2024-12-07T13:27:53,764 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/35791c8115cb4dd2bbb4ed11d5e7a762 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/35791c8115cb4dd2bbb4ed11d5e7a762 2024-12-07T13:27:53,766 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/7bee0fc6f51d4917a1db669bb75f9664 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/7bee0fc6f51d4917a1db669bb75f9664 2024-12-07T13:27:53,767 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4eeaac854591462499d5eb516b17e1ff to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/4eeaac854591462499d5eb516b17e1ff 2024-12-07T13:27:53,769 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b17c85c32c044aa09b79dc90ce01208b to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b17c85c32c044aa09b79dc90ce01208b 2024-12-07T13:27:53,770 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c463461b64bc4cb591d1fc5b771e849f to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c463461b64bc4cb591d1fc5b771e849f 2024-12-07T13:27:53,771 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e2851ec985fc4680a49a68b8a00bde6f to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/e2851ec985fc4680a49a68b8a00bde6f 2024-12-07T13:27:53,772 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/362f5394705b4a429c64ebce8b0df0ac to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/362f5394705b4a429c64ebce8b0df0ac 2024-12-07T13:27:53,773 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/9e5a9ba49bc64b6894e4813538bd0038 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/9e5a9ba49bc64b6894e4813538bd0038 2024-12-07T13:27:53,774 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b871865e434545138b66a4fb6a26b7cc to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/b871865e434545138b66a4fb6a26b7cc 2024-12-07T13:27:53,775 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/98052c7bad5047689e02dbf19f126cf1 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/98052c7bad5047689e02dbf19f126cf1 2024-12-07T13:27:53,777 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/08fdf19cfdc84f4d916196fdcebc7f3b to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/08fdf19cfdc84f4d916196fdcebc7f3b 2024-12-07T13:27:53,778 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c6b0bf0cc67e4c7bacd4e980a3fa4081 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/c6b0bf0cc67e4c7bacd4e980a3fa4081 2024-12-07T13:27:53,779 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/a13e09086e8b4f9e972e8a5f90b8bf05 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/a13e09086e8b4f9e972e8a5f90b8bf05 2024-12-07T13:27:53,780 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/6b6a5f23730a47148ae237a6a88d2bf4 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/6b6a5f23730a47148ae237a6a88d2bf4 2024-12-07T13:27:53,781 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/ddaaabb151e14ff5b872633e182dcd25 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/ddaaabb151e14ff5b872633e182dcd25 2024-12-07T13:27:53,782 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/58f00c2dd663471e8557c17a4ac637c6 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/info/58f00c2dd663471e8557c17a4ac637c6 2024-12-07T13:27:53,783 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=c7c455b68129:38977 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-07T13:27:53,783 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [e9c6ca7aa71e48858ee89145561c8651=42984, 5a0955005bbd4d928f921759af2a1b27=12516, 235e2ceee5954cfe9eca59ad06e08946=63636, b50f590f4f0b43f3bd3b39a077ee3ece=17906, 4a414b4eeb234cfbac642608c759b88c=17906, 35791c8115cb4dd2bbb4ed11d5e7a762=84293, 7bee0fc6f51d4917a1db669bb75f9664=12516, 4eeaac854591462499d5eb516b17e1ff=16828, b17c85c32c044aa09b79dc90ce01208b=111353, c463461b64bc4cb591d1fc5b771e849f=20078, e2851ec985fc4680a49a68b8a00bde6f=12516, 362f5394705b4a429c64ebce8b0df0ac=132069, 9e5a9ba49bc64b6894e4813538bd0038=17906, b871865e434545138b66a4fb6a26b7cc=16828, 98052c7bad5047689e02dbf19f126cf1=151655, 08fdf19cfdc84f4d916196fdcebc7f3b=12523, c6b0bf0cc67e4c7bacd4e980a3fa4081=20092, a13e09086e8b4f9e972e8a5f90b8bf05=178740, 6b6a5f23730a47148ae237a6a88d2bf4=16839, ddaaabb151e14ff5b872633e182dcd25=12523, 58f00c2dd663471e8557c17a4ac637c6=17918] 2024-12-07T13:27:53,787 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/977deab04a51d2e8e101e5c7f7816b2b/recovered.edits/349.seqid, newMaxSeqId=349, maxSeqId=130 2024-12-07T13:27:53,787 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:53,787 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 977deab04a51d2e8e101e5c7f7816b2b: Waiting for close lock at 1733578073748Running coprocessor pre-close hooks at 1733578073748Disabling compacts and flushes for region at 1733578073748Disabling writes for close at 1733578073748Writing region close event to WAL at 1733578073784 (+36 ms)Running coprocessor post-close hooks at 1733578073787 (+3 ms)Closed at 1733578073787 2024-12-07T13:27:53,788 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733578051096.977deab04a51d2e8e101e5c7f7816b2b. 2024-12-07T13:27:53,788 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 85a851f2688c52b461cdf08f415e962c, disabling compactions & flushes 2024-12-07T13:27:53,788 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. 2024-12-07T13:27:53,788 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. 2024-12-07T13:27:53,788 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. after waiting 0 ms 2024-12-07T13:27:53,788 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. 2024-12-07T13:27:53,788 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45->hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/82e88219ec4ec4a4fff2a6f409a36e45/info/085f4f8c7f6347d48dc94f1ef2666f12-bottom] to archive 2024-12-07T13:27:53,789 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-07T13:27:53,790 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45 to hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/archive/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/info/085f4f8c7f6347d48dc94f1ef2666f12.82e88219ec4ec4a4fff2a6f409a36e45 2024-12-07T13:27:53,790 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-07T13:27:53,794 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/data/default/TestLogRolling-testLogRolling/85a851f2688c52b461cdf08f415e962c/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-12-07T13:27:53,794 INFO [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. 2024-12-07T13:27:53,794 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 85a851f2688c52b461cdf08f415e962c: Waiting for close lock at 1733578073788Running coprocessor pre-close hooks at 1733578073788Disabling compacts and flushes for region at 1733578073788Disabling writes for close at 1733578073788Writing region close event to WAL at 1733578073791 (+3 ms)Running coprocessor post-close hooks at 1733578073794 (+3 ms)Closed at 1733578073794 2024-12-07T13:27:53,794 DEBUG [RS_CLOSE_REGION-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733578051096.85a851f2688c52b461cdf08f415e962c. 2024-12-07T13:27:53,949 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(976): stopping server c7c455b68129,34335,1733578035593; all regions closed. 2024-12-07T13:27:53,950 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,950 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,951 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,951 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,951 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741834_1010 (size=8107) 2024-12-07T13:27:53,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741834_1010 (size=8107) 2024-12-07T13:27:53,962 DEBUG [RS:0;c7c455b68129:34335 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/oldWALs 2024-12-07T13:27:53,962 INFO [RS:0;c7c455b68129:34335 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C34335%2C1733578035593.meta:.meta(num 1733578036616) 2024-12-07T13:27:53,963 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,963 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,963 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,963 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,963 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:53,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741883_1059 (size=780) 2024-12-07T13:27:53,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741883_1059 (size=780) 2024-12-07T13:27:53,968 DEBUG [RS:0;c7c455b68129:34335 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/oldWALs 2024-12-07T13:27:53,968 INFO [RS:0;c7c455b68129:34335 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C34335%2C1733578035593:(num 1733578073618) 2024-12-07T13:27:53,968 DEBUG [RS:0;c7c455b68129:34335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:53,968 INFO [RS:0;c7c455b68129:34335 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:27:53,968 INFO [RS:0;c7c455b68129:34335 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:27:53,969 INFO [RS:0;c7c455b68129:34335 {}] hbase.ChoreService(370): Chore service for: regionserver/c7c455b68129:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T13:27:53,969 INFO [RS:0;c7c455b68129:34335 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:27:53,969 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:27:53,969 INFO [RS:0;c7c455b68129:34335 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:34335 2024-12-07T13:27:53,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c7c455b68129,34335,1733578035593 2024-12-07T13:27:53,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:27:53,977 INFO [RS:0;c7c455b68129:34335 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:27:53,977 INFO [regionserver/c7c455b68129:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:27:53,988 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c7c455b68129,34335,1733578035593] 2024-12-07T13:27:53,998 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c7c455b68129,34335,1733578035593 already deleted, retry=false 2024-12-07T13:27:53,998 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c7c455b68129,34335,1733578035593 expired; onlineServers=0 2024-12-07T13:27:53,998 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c7c455b68129,38977,1733578035403' ***** 2024-12-07T13:27:53,998 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T13:27:53,998 INFO [M:0;c7c455b68129:38977 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:27:53,998 INFO [M:0;c7c455b68129:38977 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:27:53,998 DEBUG [M:0;c7c455b68129:38977 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T13:27:53,999 DEBUG [M:0;c7c455b68129:38977 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T13:27:53,999 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T13:27:53,999 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733578035916 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733578035916,5,FailOnTimeoutGroup] 2024-12-07T13:27:53,999 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733578035915 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733578035915,5,FailOnTimeoutGroup] 2024-12-07T13:27:53,999 INFO [M:0;c7c455b68129:38977 {}] hbase.ChoreService(370): Chore service for: master/c7c455b68129:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T13:27:53,999 INFO [M:0;c7c455b68129:38977 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:27:53,999 DEBUG [M:0;c7c455b68129:38977 {}] master.HMaster(1795): Stopping service threads 2024-12-07T13:27:53,999 INFO [M:0;c7c455b68129:38977 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T13:27:53,999 INFO [M:0;c7c455b68129:38977 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:27:53,999 INFO [M:0;c7c455b68129:38977 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T13:27:53,999 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T13:27:54,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T13:27:54,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:54,009 DEBUG [M:0;c7c455b68129:38977 {}] zookeeper.ZKUtil(347): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T13:27:54,009 WARN [M:0;c7c455b68129:38977 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T13:27:54,009 INFO [M:0;c7c455b68129:38977 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/.lastflushedseqids 2024-12-07T13:27:54,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741884_1060 (size=228) 2024-12-07T13:27:54,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741884_1060 (size=228) 2024-12-07T13:27:54,014 INFO [M:0;c7c455b68129:38977 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T13:27:54,014 INFO [M:0;c7c455b68129:38977 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T13:27:54,014 DEBUG [M:0;c7c455b68129:38977 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:27:54,014 INFO [M:0;c7c455b68129:38977 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:54,014 DEBUG [M:0;c7c455b68129:38977 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:54,014 DEBUG [M:0;c7c455b68129:38977 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:27:54,014 DEBUG [M:0;c7c455b68129:38977 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:54,015 INFO [M:0;c7c455b68129:38977 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.38 KB 2024-12-07T13:27:54,028 DEBUG [M:0;c7c455b68129:38977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1f67f49074de411c8810414cf0b10e02 is 82, key is hbase:meta,,1/info:regioninfo/1733578036645/Put/seqid=0 2024-12-07T13:27:54,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741885_1061 (size=5672) 2024-12-07T13:27:54,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741885_1061 (size=5672) 2024-12-07T13:27:54,032 INFO [M:0;c7c455b68129:38977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1f67f49074de411c8810414cf0b10e02 2024-12-07T13:27:54,048 DEBUG [M:0;c7c455b68129:38977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fd79254cf99e4743b42c276134af982d is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733578037122/Put/seqid=0 2024-12-07T13:27:54,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741886_1062 (size=7090) 2024-12-07T13:27:54,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741886_1062 (size=7090) 2024-12-07T13:27:54,052 INFO [M:0;c7c455b68129:38977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fd79254cf99e4743b42c276134af982d 2024-12-07T13:27:54,055 INFO [M:0;c7c455b68129:38977 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fd79254cf99e4743b42c276134af982d 2024-12-07T13:27:54,070 DEBUG [M:0;c7c455b68129:38977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/551140d9e5fc4fe48c87c43f4e571393 is 69, key is c7c455b68129,34335,1733578035593/rs:state/1733578035955/Put/seqid=0 2024-12-07T13:27:54,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741887_1063 (size=5156) 2024-12-07T13:27:54,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741887_1063 (size=5156) 2024-12-07T13:27:54,074 INFO [M:0;c7c455b68129:38977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/551140d9e5fc4fe48c87c43f4e571393 2024-12-07T13:27:54,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:54,088 INFO [RS:0;c7c455b68129:34335 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:27:54,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34335-0x100007597840001, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:54,088 INFO [RS:0;c7c455b68129:34335 {}] regionserver.HRegionServer(1031): Exiting; stopping=c7c455b68129,34335,1733578035593; zookeeper connection closed. 2024-12-07T13:27:54,088 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@181380a9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@181380a9 2024-12-07T13:27:54,088 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T13:27:54,090 DEBUG [M:0;c7c455b68129:38977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/de7e279a67ef48feb976e09a0abff985 is 52, key is load_balancer_on/state:d/1733578036729/Put/seqid=0 2024-12-07T13:27:54,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741888_1064 (size=5056) 2024-12-07T13:27:54,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741888_1064 (size=5056) 2024-12-07T13:27:54,094 INFO [M:0;c7c455b68129:38977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/de7e279a67ef48feb976e09a0abff985 2024-12-07T13:27:54,099 DEBUG [M:0;c7c455b68129:38977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1f67f49074de411c8810414cf0b10e02 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1f67f49074de411c8810414cf0b10e02 2024-12-07T13:27:54,103 INFO [M:0;c7c455b68129:38977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1f67f49074de411c8810414cf0b10e02, entries=8, sequenceid=125, filesize=5.5 K 2024-12-07T13:27:54,103 DEBUG [M:0;c7c455b68129:38977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fd79254cf99e4743b42c276134af982d as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fd79254cf99e4743b42c276134af982d 2024-12-07T13:27:54,107 INFO [M:0;c7c455b68129:38977 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fd79254cf99e4743b42c276134af982d 2024-12-07T13:27:54,107 INFO [M:0;c7c455b68129:38977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fd79254cf99e4743b42c276134af982d, entries=13, sequenceid=125, filesize=6.9 K 2024-12-07T13:27:54,108 DEBUG [M:0;c7c455b68129:38977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/551140d9e5fc4fe48c87c43f4e571393 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/551140d9e5fc4fe48c87c43f4e571393 2024-12-07T13:27:54,113 INFO [M:0;c7c455b68129:38977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/551140d9e5fc4fe48c87c43f4e571393, entries=1, sequenceid=125, filesize=5.0 K 2024-12-07T13:27:54,114 DEBUG [M:0;c7c455b68129:38977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/de7e279a67ef48feb976e09a0abff985 as hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/de7e279a67ef48feb976e09a0abff985 2024-12-07T13:27:54,120 INFO [M:0;c7c455b68129:38977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37523/user/jenkins/test-data/53ec9f30-9c83-5d01-c329-55bb2868355f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/de7e279a67ef48feb976e09a0abff985, entries=1, sequenceid=125, filesize=4.9 K 2024-12-07T13:27:54,122 INFO [M:0;c7c455b68129:38977 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 107ms, sequenceid=125, compaction requested=false 2024-12-07T13:27:54,123 INFO [M:0;c7c455b68129:38977 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:54,123 DEBUG [M:0;c7c455b68129:38977 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733578074014Disabling compacts and flushes for region at 1733578074014Disabling writes for close at 1733578074014Obtaining lock to block concurrent updates at 1733578074015 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733578074015Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64840, getOffHeapSize=0, getCellsCount=148 at 1733578074015Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733578074015Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733578074016 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733578074027 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733578074027Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733578074035 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733578074047 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733578074047Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733578074055 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733578074069 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733578074069Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733578074078 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733578074090 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733578074090Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14180c18: reopening flushed file at 1733578074098 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b4d29c3: reopening flushed file at 1733578074103 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30c30f1e: reopening flushed file at 1733578074107 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66d9e5ad: reopening flushed file at 1733578074113 (+6 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 107ms, sequenceid=125, compaction requested=false at 1733578074122 (+9 ms)Writing region close event to WAL at 1733578074123 (+1 ms)Closed at 1733578074123 2024-12-07T13:27:54,123 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:54,123 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:54,123 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:54,123 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:54,124 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:54,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32921 is added to blk_1073741830_1006 (size=61332) 2024-12-07T13:27:54,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34771 is added to blk_1073741830_1006 (size=61332) 2024-12-07T13:27:54,126 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:27:54,126 INFO [M:0;c7c455b68129:38977 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T13:27:54,126 INFO [M:0;c7c455b68129:38977 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38977 2024-12-07T13:27:54,126 INFO [M:0;c7c455b68129:38977 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:27:54,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:54,235 INFO [M:0;c7c455b68129:38977 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:27:54,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38977-0x100007597840000, quorum=127.0.0.1:64288, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:54,240 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3dc4994c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:27:54,241 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@25f949b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:27:54,241 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:27:54,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a646ba1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:27:54,241 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4445ac53{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/hadoop.log.dir/,STOPPED} 2024-12-07T13:27:54,245 WARN [BP-622239927-172.17.0.3-1733578033187 heartbeating to localhost/127.0.0.1:37523 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:27:54,245 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:27:54,245 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:27:54,245 WARN [BP-622239927-172.17.0.3-1733578033187 heartbeating to localhost/127.0.0.1:37523 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-622239927-172.17.0.3-1733578033187 (Datanode Uuid 11bf8af6-d51f-452a-b8a3-d1ac17aa06ee) service to localhost/127.0.0.1:37523 2024-12-07T13:27:54,246 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/data/data3/current/BP-622239927-172.17.0.3-1733578033187 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:54,247 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/data/data4/current/BP-622239927-172.17.0.3-1733578033187 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:54,247 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:27:54,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3523e770{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:27:54,249 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f53b1c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:27:54,249 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:27:54,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3057e5b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:27:54,249 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719b1e37{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/hadoop.log.dir/,STOPPED} 2024-12-07T13:27:54,251 WARN [BP-622239927-172.17.0.3-1733578033187 heartbeating to localhost/127.0.0.1:37523 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:27:54,251 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:27:54,251 WARN [BP-622239927-172.17.0.3-1733578033187 heartbeating to localhost/127.0.0.1:37523 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-622239927-172.17.0.3-1733578033187 (Datanode Uuid 99df9b7b-45d0-4690-a3c4-aeca82fe250f) service to localhost/127.0.0.1:37523 2024-12-07T13:27:54,251 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:27:54,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/data/data1/current/BP-622239927-172.17.0.3-1733578033187 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:54,251 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/cluster_c9e402af-1f98-bf37-266b-cc9de78b5940/data/data2/current/BP-622239927-172.17.0.3-1733578033187 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:54,251 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:27:54,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5db25599{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:27:54,256 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f8818bb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:27:54,256 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:27:54,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@516e643a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:27:54,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75d9b484{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/hadoop.log.dir/,STOPPED} 2024-12-07T13:27:54,263 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T13:27:54,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T13:27:54,298 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 205) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37523 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:37523 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37523 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37523 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37523 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37523 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37523 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37523 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=512 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=144 (was 134) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=15668 (was 15771) 2024-12-07T13:27:54,304 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=512, MaxFileDescriptor=1048576, SystemLoadAverage=144, ProcessCount=11, AvailableMemoryMB=15668 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/hadoop.log.dir so I do NOT create it in target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3c88c6db-a566-8e14-e599-3a5f299ea0ab/hadoop.tmp.dir so I do NOT create it in target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658, deleteOnExit=true 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/test.cache.data in system properties and HBase conf 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/hadoop.log.dir in system properties and HBase conf 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T13:27:54,305 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-07T13:27:54,305 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/nfs.dump.dir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/java.io.tmpdir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T13:27:54,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T13:27:54,318 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:27:54,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:54,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:54,682 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:27:54,685 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:27:54,687 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:27:54,687 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:27:54,687 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:27:54,688 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:27:54,689 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18478920{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:27:54,689 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40f3733a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:27:54,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@265f6a26{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/java.io.tmpdir/jetty-localhost-37227-hadoop-hdfs-3_4_1-tests_jar-_-any-773944556844706443/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:27:54,778 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64e1b9c7{HTTP/1.1, (http/1.1)}{localhost:37227} 2024-12-07T13:27:54,778 INFO [Time-limited test {}] server.Server(415): Started @292619ms 2024-12-07T13:27:54,788 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-07T13:27:55,042 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:27:55,044 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:27:55,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:27:55,045 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:27:55,045 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T13:27:55,045 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1cfa6b2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:27:55,046 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56063e0c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:27:55,135 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ec1c28e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/java.io.tmpdir/jetty-localhost-42655-hadoop-hdfs-3_4_1-tests_jar-_-any-2494217398245392412/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:27:55,135 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2601a9a1{HTTP/1.1, (http/1.1)}{localhost:42655} 2024-12-07T13:27:55,136 INFO [Time-limited test {}] server.Server(415): Started @292976ms 2024-12-07T13:27:55,136 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:27:55,159 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T13:27:55,161 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T13:27:55,162 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T13:27:55,162 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T13:27:55,162 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T13:27:55,162 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2551ca75{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/hadoop.log.dir/,AVAILABLE} 2024-12-07T13:27:55,162 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2753102b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T13:27:55,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@403020f8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/java.io.tmpdir/jetty-localhost-35475-hadoop-hdfs-3_4_1-tests_jar-_-any-6250285038055925763/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:27:55,252 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6f4abee{HTTP/1.1, (http/1.1)}{localhost:35475} 2024-12-07T13:27:55,252 INFO [Time-limited test {}] server.Server(415): Started @293093ms 2024-12-07T13:27:55,253 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T13:27:55,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:55,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:56,276 WARN [Thread-2506 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/data/data1/current/BP-816922664-172.17.0.3-1733578074321/current, will proceed with Du for space computation calculation, 2024-12-07T13:27:56,276 WARN [Thread-2507 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/data/data2/current/BP-816922664-172.17.0.3-1733578074321/current, will proceed with Du for space computation calculation, 2024-12-07T13:27:56,294 WARN [Thread-2470 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:27:56,296 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25e3011cb1a0fde3 with lease ID 0x757aed327ff262c9: Processing first storage report for DS-b7a59383-afde-4a35-9636-5e5d3056a803 from datanode DatanodeRegistration(127.0.0.1:45873, datanodeUuid=14f8f4b2-dd45-4ed4-ac1f-b5cee7cc69fe, infoPort=34733, infoSecurePort=0, ipcPort=41543, storageInfo=lv=-57;cid=testClusterID;nsid=275385388;c=1733578074321) 2024-12-07T13:27:56,296 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25e3011cb1a0fde3 with lease ID 0x757aed327ff262c9: from storage DS-b7a59383-afde-4a35-9636-5e5d3056a803 node DatanodeRegistration(127.0.0.1:45873, datanodeUuid=14f8f4b2-dd45-4ed4-ac1f-b5cee7cc69fe, infoPort=34733, infoSecurePort=0, ipcPort=41543, storageInfo=lv=-57;cid=testClusterID;nsid=275385388;c=1733578074321), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:27:56,296 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x25e3011cb1a0fde3 with lease ID 0x757aed327ff262c9: Processing first storage report for DS-5b76fffe-63d8-49f6-9d63-fbfa6dfe606f from datanode DatanodeRegistration(127.0.0.1:45873, datanodeUuid=14f8f4b2-dd45-4ed4-ac1f-b5cee7cc69fe, infoPort=34733, infoSecurePort=0, ipcPort=41543, storageInfo=lv=-57;cid=testClusterID;nsid=275385388;c=1733578074321) 2024-12-07T13:27:56,296 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x25e3011cb1a0fde3 with lease ID 0x757aed327ff262c9: from storage DS-5b76fffe-63d8-49f6-9d63-fbfa6dfe606f node DatanodeRegistration(127.0.0.1:45873, datanodeUuid=14f8f4b2-dd45-4ed4-ac1f-b5cee7cc69fe, infoPort=34733, infoSecurePort=0, ipcPort=41543, storageInfo=lv=-57;cid=testClusterID;nsid=275385388;c=1733578074321), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:27:56,412 WARN [Thread-2518 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/data/data4/current/BP-816922664-172.17.0.3-1733578074321/current, will proceed with Du for space computation calculation, 2024-12-07T13:27:56,412 WARN [Thread-2517 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/data/data3/current/BP-816922664-172.17.0.3-1733578074321/current, will proceed with Du for space computation calculation, 2024-12-07T13:27:56,432 WARN [Thread-2493 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T13:27:56,434 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9e6ab8e98b1656a8 with lease ID 0x757aed327ff262ca: Processing first storage report for DS-21dc4ab7-1d6d-40f7-9c95-bc74ff1618a4 from datanode DatanodeRegistration(127.0.0.1:39125, datanodeUuid=26fc21ae-d79a-4e66-9a67-5813c2e293f1, infoPort=32791, infoSecurePort=0, ipcPort=38667, storageInfo=lv=-57;cid=testClusterID;nsid=275385388;c=1733578074321) 2024-12-07T13:27:56,434 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9e6ab8e98b1656a8 with lease ID 0x757aed327ff262ca: from storage DS-21dc4ab7-1d6d-40f7-9c95-bc74ff1618a4 node DatanodeRegistration(127.0.0.1:39125, datanodeUuid=26fc21ae-d79a-4e66-9a67-5813c2e293f1, infoPort=32791, infoSecurePort=0, ipcPort=38667, storageInfo=lv=-57;cid=testClusterID;nsid=275385388;c=1733578074321), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:27:56,434 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9e6ab8e98b1656a8 with lease ID 0x757aed327ff262ca: Processing first storage report for DS-ee419d43-4458-4817-ae6f-bd9253f01531 from datanode DatanodeRegistration(127.0.0.1:39125, datanodeUuid=26fc21ae-d79a-4e66-9a67-5813c2e293f1, infoPort=32791, infoSecurePort=0, ipcPort=38667, storageInfo=lv=-57;cid=testClusterID;nsid=275385388;c=1733578074321) 2024-12-07T13:27:56,434 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9e6ab8e98b1656a8 with lease ID 0x757aed327ff262ca: from storage DS-ee419d43-4458-4817-ae6f-bd9253f01531 node DatanodeRegistration(127.0.0.1:39125, datanodeUuid=26fc21ae-d79a-4e66-9a67-5813c2e293f1, infoPort=32791, infoSecurePort=0, ipcPort=38667, storageInfo=lv=-57;cid=testClusterID;nsid=275385388;c=1733578074321), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T13:27:56,481 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11 2024-12-07T13:27:56,485 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/zookeeper_0, clientPort=60064, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T13:27:56,486 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60064 2024-12-07T13:27:56,486 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:56,488 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:56,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:27:56,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741825_1001 (size=7) 2024-12-07T13:27:56,499 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c with version=8 2024-12-07T13:27:56,499 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33723/user/jenkins/test-data/3eb2eb4e-7df3-dd45-b018-332cf885bdba/hbase-staging 2024-12-07T13:27:56,501 INFO [Time-limited test {}] client.ConnectionUtils(128): master/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:27:56,501 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:56,501 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:56,501 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:27:56,501 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:56,501 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:27:56,501 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-07T13:27:56,501 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:27:56,502 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41799 2024-12-07T13:27:56,503 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41799 connecting to ZooKeeper ensemble=127.0.0.1:60064 2024-12-07T13:27:56,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:417990x0, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:27:56,572 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41799-0x1000076380a0000 connected 2024-12-07T13:27:56,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:56,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:56,893 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:56,897 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:56,900 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:27:56,900 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c, hbase.cluster.distributed=false 2024-12-07T13:27:56,902 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:27:56,902 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41799 2024-12-07T13:27:56,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41799 2024-12-07T13:27:56,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41799 2024-12-07T13:27:56,903 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41799 2024-12-07T13:27:56,904 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41799 2024-12-07T13:27:56,919 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/c7c455b68129:0 server-side Connection retries=45 2024-12-07T13:27:56,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:56,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:56,919 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T13:27:56,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T13:27:56,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T13:27:56,919 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T13:27:56,919 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T13:27:56,919 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33821 2024-12-07T13:27:56,920 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33821 connecting to ZooKeeper ensemble=127.0.0.1:60064 2024-12-07T13:27:56,921 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:56,922 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:56,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338210x0, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T13:27:56,946 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33821-0x1000076380a0001 connected 2024-12-07T13:27:56,946 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:27:56,946 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T13:27:56,947 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T13:27:56,948 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T13:27:56,949 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T13:27:56,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33821 2024-12-07T13:27:56,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33821 2024-12-07T13:27:56,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33821 2024-12-07T13:27:56,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33821 2024-12-07T13:27:56,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33821 2024-12-07T13:27:56,965 DEBUG [M:0;c7c455b68129:41799 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;c7c455b68129:41799 2024-12-07T13:27:56,965 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/c7c455b68129,41799,1733578076500 2024-12-07T13:27:56,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:27:56,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:27:56,977 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/c7c455b68129,41799,1733578076500 2024-12-07T13:27:56,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T13:27:56,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:56,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:56,988 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T13:27:56,989 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/c7c455b68129,41799,1733578076500 from backup master directory 2024-12-07T13:27:56,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:27:56,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/c7c455b68129,41799,1733578076500 2024-12-07T13:27:56,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T13:27:56,998 WARN [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:27:56,998 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=c7c455b68129,41799,1733578076500 2024-12-07T13:27:57,005 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/hbase.id] with ID: 5f2c492f-d1f7-428b-a985-9bac090a5682 2024-12-07T13:27:57,005 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/.tmp/hbase.id 2024-12-07T13:27:57,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:27:57,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741826_1002 (size=42) 2024-12-07T13:27:57,015 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/.tmp/hbase.id]:[hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/hbase.id] 2024-12-07T13:27:57,031 INFO [master/c7c455b68129:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:57,031 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-07T13:27:57,033 INFO [master/c7c455b68129:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-07T13:27:57,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:57,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:57,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:27:57,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741827_1003 (size=196) 2024-12-07T13:27:57,047 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T13:27:57,048 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T13:27:57,048 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:27:57,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:27:57,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741828_1004 (size=1189) 2024-12-07T13:27:57,056 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store 2024-12-07T13:27:57,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:27:57,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741829_1005 (size=34) 2024-12-07T13:27:57,063 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:57,063 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:27:57,063 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:57,063 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:57,063 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:27:57,063 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:57,063 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:57,063 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733578077063Disabling compacts and flushes for region at 1733578077063Disabling writes for close at 1733578077063Writing region close event to WAL at 1733578077063Closed at 1733578077063 2024-12-07T13:27:57,064 WARN [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/.initializing 2024-12-07T13:27:57,064 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/WALs/c7c455b68129,41799,1733578076500 2024-12-07T13:27:57,066 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C41799%2C1733578076500, suffix=, logDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/WALs/c7c455b68129,41799,1733578076500, archiveDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/oldWALs, maxLogs=10 2024-12-07T13:27:57,066 INFO [master/c7c455b68129:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C41799%2C1733578076500.1733578077066 2024-12-07T13:27:57,071 INFO [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/WALs/c7c455b68129,41799,1733578076500/c7c455b68129%2C41799%2C1733578076500.1733578077066 2024-12-07T13:27:57,072 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34733:34733),(127.0.0.1/127.0.0.1:32791:32791)] 2024-12-07T13:27:57,072 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:27:57,073 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:57,073 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,073 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,075 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T13:27:57,075 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,075 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:57,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T13:27:57,077 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:27:57,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T13:27:57,078 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:27:57,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,079 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T13:27:57,079 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T13:27:57,080 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,080 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,080 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,082 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,082 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,082 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T13:27:57,083 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T13:27:57,085 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:27:57,085 INFO [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692759, jitterRate=-0.11911244690418243}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T13:27:57,085 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733578077073Initializing all the Stores at 1733578077073Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578077073Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578077074 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578077074Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578077074Cleaning up temporary data from old regions at 1733578077082 (+8 ms)Region opened successfully at 1733578077085 (+3 ms) 2024-12-07T13:27:57,085 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T13:27:57,087 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c9b2525, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:27:57,088 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-07T13:27:57,088 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T13:27:57,088 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T13:27:57,088 INFO [master/c7c455b68129:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T13:27:57,089 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-07T13:27:57,089 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-07T13:27:57,089 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T13:27:57,091 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T13:27:57,091 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T13:27:57,103 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-07T13:27:57,103 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T13:27:57,104 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T13:27:57,114 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-07T13:27:57,114 INFO [master/c7c455b68129:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T13:27:57,115 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T13:27:57,124 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-07T13:27:57,125 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T13:27:57,134 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T13:27:57,137 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T13:27:57,145 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T13:27:57,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:27:57,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T13:27:57,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:57,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:57,156 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=c7c455b68129,41799,1733578076500, sessionid=0x1000076380a0000, setting cluster-up flag (Was=false) 2024-12-07T13:27:57,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:57,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:57,208 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T13:27:57,210 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,41799,1733578076500 2024-12-07T13:27:57,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:57,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:57,262 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T13:27:57,264 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=c7c455b68129,41799,1733578076500 2024-12-07T13:27:57,266 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-07T13:27:57,268 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-07T13:27:57,268 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-07T13:27:57,268 INFO [master/c7c455b68129:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T13:27:57,269 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: c7c455b68129,41799,1733578076500 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T13:27:57,271 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:27:57,271 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:27:57,271 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:27:57,271 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/c7c455b68129:0, corePoolSize=5, maxPoolSize=5 2024-12-07T13:27:57,271 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/c7c455b68129:0, corePoolSize=10, maxPoolSize=10 2024-12-07T13:27:57,271 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,271 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:27:57,271 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,272 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733578107272 2024-12-07T13:27:57,272 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T13:27:57,272 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T13:27:57,272 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T13:27:57,272 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T13:27:57,273 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T13:27:57,273 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T13:27:57,273 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:27:57,273 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,273 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-07T13:27:57,273 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T13:27:57,273 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T13:27:57,273 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T13:27:57,273 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T13:27:57,273 INFO [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T13:27:57,274 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733578077274,5,FailOnTimeoutGroup] 2024-12-07T13:27:57,274 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,274 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733578077274,5,FailOnTimeoutGroup] 2024-12-07T13:27:57,274 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,274 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T13:27:57,274 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,274 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T13:27:57,274 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:27:57,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741831_1007 (size=1321) 2024-12-07T13:27:57,280 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-07T13:27:57,281 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c 2024-12-07T13:27:57,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:27:57,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741832_1008 (size=32) 2024-12-07T13:27:57,287 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:57,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:27:57,289 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:27:57,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:57,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:27:57,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:27:57,290 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:57,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:27:57,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:27:57,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:57,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:27:57,293 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:27:57,293 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:57,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:27:57,294 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740 2024-12-07T13:27:57,294 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740 2024-12-07T13:27:57,295 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:27:57,296 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:27:57,296 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:27:57,297 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:27:57,299 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T13:27:57,299 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878043, jitterRate=0.11648991703987122}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:27:57,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733578077287Initializing all the Stores at 1733578077287Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578077287Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578077288 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578077288Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578077288Cleaning up temporary data from old regions at 1733578077296 (+8 ms)Region opened successfully at 1733578077299 (+3 ms) 2024-12-07T13:27:57,299 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:27:57,299 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:27:57,300 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:27:57,300 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:27:57,300 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:27:57,300 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:27:57,300 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733578077299Disabling compacts and flushes for region at 1733578077299Disabling writes for close at 1733578077300 (+1 ms)Writing region close event to WAL at 1733578077300Closed at 1733578077300 2024-12-07T13:27:57,301 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:27:57,301 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-07T13:27:57,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T13:27:57,302 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:27:57,303 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T13:27:57,354 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(746): ClusterId : 5f2c492f-d1f7-428b-a985-9bac090a5682 2024-12-07T13:27:57,354 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T13:27:57,436 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T13:27:57,436 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T13:27:57,453 WARN [c7c455b68129:41799 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-07T13:27:57,463 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T13:27:57,464 DEBUG [RS:0;c7c455b68129:33821 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1611f9ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=c7c455b68129/172.17.0.3:0 2024-12-07T13:27:57,478 DEBUG [RS:0;c7c455b68129:33821 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;c7c455b68129:33821 2024-12-07T13:27:57,478 INFO [RS:0;c7c455b68129:33821 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-07T13:27:57,478 INFO [RS:0;c7c455b68129:33821 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-07T13:27:57,478 DEBUG [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-07T13:27:57,479 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(2659): reportForDuty to master=c7c455b68129,41799,1733578076500 with port=33821, startcode=1733578076918 2024-12-07T13:27:57,479 DEBUG [RS:0;c7c455b68129:33821 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T13:27:57,481 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57745, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T13:27:57,481 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] master.ServerManager(363): Checking decommissioned status of RegionServer c7c455b68129,33821,1733578076918 2024-12-07T13:27:57,481 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41799 {}] master.ServerManager(517): Registering regionserver=c7c455b68129,33821,1733578076918 2024-12-07T13:27:57,483 DEBUG [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c 2024-12-07T13:27:57,483 DEBUG [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45737 2024-12-07T13:27:57,483 DEBUG [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-07T13:27:57,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:27:57,493 DEBUG [RS:0;c7c455b68129:33821 {}] zookeeper.ZKUtil(111): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/c7c455b68129,33821,1733578076918 2024-12-07T13:27:57,493 WARN [RS:0;c7c455b68129:33821 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T13:27:57,493 INFO [RS:0;c7c455b68129:33821 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:27:57,493 DEBUG [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/WALs/c7c455b68129,33821,1733578076918 2024-12-07T13:27:57,493 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [c7c455b68129,33821,1733578076918] 2024-12-07T13:27:57,496 INFO [RS:0;c7c455b68129:33821 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T13:27:57,497 INFO [RS:0;c7c455b68129:33821 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T13:27:57,497 INFO [RS:0;c7c455b68129:33821 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T13:27:57,497 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,497 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-07T13:27:57,498 INFO [RS:0;c7c455b68129:33821 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-07T13:27:57,498 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,498 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,498 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,498 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,498 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,498 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,498 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/c7c455b68129:0, corePoolSize=2, maxPoolSize=2 2024-12-07T13:27:57,499 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,499 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,499 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,499 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,499 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,499 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/c7c455b68129:0, corePoolSize=1, maxPoolSize=1 2024-12-07T13:27:57,499 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:27:57,499 DEBUG [RS:0;c7c455b68129:33821 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/c7c455b68129:0, corePoolSize=3, maxPoolSize=3 2024-12-07T13:27:57,499 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,499 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,499 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,499 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,499 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,499 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,33821,1733578076918-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:27:57,511 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T13:27:57,511 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,33821,1733578076918-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,511 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,511 INFO [RS:0;c7c455b68129:33821 {}] regionserver.Replication(171): c7c455b68129,33821,1733578076918 started 2024-12-07T13:27:57,522 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:57,522 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(1482): Serving as c7c455b68129,33821,1733578076918, RpcServer on c7c455b68129/172.17.0.3:33821, sessionid=0x1000076380a0001 2024-12-07T13:27:57,522 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T13:27:57,522 DEBUG [RS:0;c7c455b68129:33821 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager c7c455b68129,33821,1733578076918 2024-12-07T13:27:57,522 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,33821,1733578076918' 2024-12-07T13:27:57,522 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T13:27:57,523 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T13:27:57,523 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T13:27:57,523 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T13:27:57,523 DEBUG [RS:0;c7c455b68129:33821 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager c7c455b68129,33821,1733578076918 2024-12-07T13:27:57,523 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'c7c455b68129,33821,1733578076918' 2024-12-07T13:27:57,523 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T13:27:57,524 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T13:27:57,524 DEBUG [RS:0;c7c455b68129:33821 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T13:27:57,524 INFO [RS:0;c7c455b68129:33821 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T13:27:57,524 INFO [RS:0;c7c455b68129:33821 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T13:27:57,628 INFO [RS:0;c7c455b68129:33821 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C33821%2C1733578076918, suffix=, logDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/WALs/c7c455b68129,33821,1733578076918, archiveDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/oldWALs, maxLogs=32 2024-12-07T13:27:57,629 INFO [RS:0;c7c455b68129:33821 {}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C33821%2C1733578076918.1733578077629 2024-12-07T13:27:57,637 INFO [RS:0;c7c455b68129:33821 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/WALs/c7c455b68129,33821,1733578076918/c7c455b68129%2C33821%2C1733578076918.1733578077629 2024-12-07T13:27:57,639 DEBUG [RS:0;c7c455b68129:33821 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34733:34733),(127.0.0.1/127.0.0.1:32791:32791)] 2024-12-07T13:27:57,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:57,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:57,703 DEBUG [c7c455b68129:41799 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-07T13:27:57,704 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=c7c455b68129,33821,1733578076918 2024-12-07T13:27:57,707 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,33821,1733578076918, state=OPENING 2024-12-07T13:27:57,714 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T13:27:57,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:57,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:57,726 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T13:27:57,726 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,33821,1733578076918}] 2024-12-07T13:27:57,726 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:27:57,726 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:27:57,882 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T13:27:57,886 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60967, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T13:27:57,891 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-07T13:27:57,891 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:27:57,894 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=c7c455b68129%2C33821%2C1733578076918.meta, suffix=.meta, logDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/WALs/c7c455b68129,33821,1733578076918, archiveDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/oldWALs, maxLogs=32 2024-12-07T13:27:57,895 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor c7c455b68129%2C33821%2C1733578076918.meta.1733578077894.meta 2024-12-07T13:27:57,901 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/WALs/c7c455b68129,33821,1733578076918/c7c455b68129%2C33821%2C1733578076918.meta.1733578077894.meta 2024-12-07T13:27:57,904 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32791:32791),(127.0.0.1/127.0.0.1:34733:34733)] 2024-12-07T13:27:57,907 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T13:27:57,907 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T13:27:57,907 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T13:27:57,907 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T13:27:57,907 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T13:27:57,907 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T13:27:57,907 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-07T13:27:57,907 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-07T13:27:57,909 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T13:27:57,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T13:27:57,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:57,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-07T13:27:57,910 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-07T13:27:57,910 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:57,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T13:27:57,911 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T13:27:57,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:57,912 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T13:27:57,912 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T13:27:57,912 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T13:27:57,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T13:27:57,913 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-07T13:27:57,913 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740 2024-12-07T13:27:57,914 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740 2024-12-07T13:27:57,915 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-07T13:27:57,915 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-07T13:27:57,915 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-07T13:27:57,916 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-07T13:27:57,917 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=726837, jitterRate=-0.07578016817569733}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-07T13:27:57,917 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-07T13:27:57,917 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733578077907Writing region info on filesystem at 1733578077907Initializing all the Stores at 1733578077908 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578077908Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578077908Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733578077909 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733578077909Cleaning up temporary data from old regions at 1733578077915 (+6 ms)Running coprocessor post-open hooks at 1733578077917 (+2 ms)Region opened successfully at 1733578077917 2024-12-07T13:27:57,918 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733578077881 2024-12-07T13:27:57,920 DEBUG [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T13:27:57,920 INFO [RS_OPEN_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-07T13:27:57,920 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=c7c455b68129,33821,1733578076918 2024-12-07T13:27:57,921 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as c7c455b68129,33821,1733578076918, state=OPEN 2024-12-07T13:27:57,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:27:57,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T13:27:57,993 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=c7c455b68129,33821,1733578076918 2024-12-07T13:27:57,993 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:27:57,993 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T13:27:57,998 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T13:27:57,998 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=c7c455b68129,33821,1733578076918 in 267 msec 2024-12-07T13:27:58,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T13:27:58,003 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 697 msec 2024-12-07T13:27:58,004 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-07T13:27:58,004 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-07T13:27:58,006 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:27:58,006 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,33821,1733578076918, seqNum=-1] 2024-12-07T13:27:58,006 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:27:58,008 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47501, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:27:58,012 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 745 msec 2024-12-07T13:27:58,013 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733578078013, completionTime=-1 2024-12-07T13:27:58,013 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-07T13:27:58,013 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-07T13:27:58,014 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-07T13:27:58,014 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733578138014 2024-12-07T13:27:58,014 INFO [master/c7c455b68129:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733578198014 2024-12-07T13:27:58,014 INFO [master/c7c455b68129:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-07T13:27:58,015 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,41799,1733578076500-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:58,015 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,41799,1733578076500-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:58,015 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,41799,1733578076500-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:58,015 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-c7c455b68129:41799, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:58,015 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:58,015 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:58,016 DEBUG [master/c7c455b68129:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-07T13:27:58,018 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.020sec 2024-12-07T13:27:58,018 INFO [master/c7c455b68129:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T13:27:58,018 INFO [master/c7c455b68129:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T13:27:58,018 INFO [master/c7c455b68129:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T13:27:58,018 INFO [master/c7c455b68129:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T13:27:58,018 INFO [master/c7c455b68129:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T13:27:58,018 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,41799,1733578076500-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T13:27:58,018 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,41799,1733578076500-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T13:27:58,020 DEBUG [master/c7c455b68129:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-07T13:27:58,020 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T13:27:58,020 INFO [master/c7c455b68129:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=c7c455b68129,41799,1733578076500-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T13:27:58,054 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e75531b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:27:58,054 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request c7c455b68129,41799,-1 for getting cluster id 2024-12-07T13:27:58,054 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-07T13:27:58,056 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5f2c492f-d1f7-428b-a985-9bac090a5682' 2024-12-07T13:27:58,056 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-07T13:27:58,057 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5f2c492f-d1f7-428b-a985-9bac090a5682" 2024-12-07T13:27:58,057 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d60768a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:27:58,057 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [c7c455b68129,41799,-1] 2024-12-07T13:27:58,058 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-07T13:27:58,058 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:58,059 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33412, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-07T13:27:58,061 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cea7179, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T13:27:58,061 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-07T13:27:58,062 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=c7c455b68129,33821,1733578076918, seqNum=-1] 2024-12-07T13:27:58,063 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T13:27:58,064 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42586, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T13:27:58,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=c7c455b68129,41799,1733578076500 2024-12-07T13:27:58,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T13:27:58,069 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-07T13:27:58,070 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-07T13:27:58,072 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/WALs/test.com,8080,1, archiveDir=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/oldWALs, maxLogs=32 2024-12-07T13:27:58,073 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733578078072 2024-12-07T13:27:58,077 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/WALs/test.com,8080,1/test.com%2C8080%2C1.1733578078072 2024-12-07T13:27:58,080 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32791:32791),(127.0.0.1/127.0.0.1:34733:34733)] 2024-12-07T13:27:58,084 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733578078084 2024-12-07T13:27:58,091 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,091 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,091 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,091 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,091 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,092 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/WALs/test.com,8080,1/test.com%2C8080%2C1.1733578078072 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/WALs/test.com,8080,1/test.com%2C8080%2C1.1733578078084 2024-12-07T13:27:58,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741835_1011 (size=93) 2024-12-07T13:27:58,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741835_1011 (size=93) 2024-12-07T13:27:58,096 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32791:32791),(127.0.0.1/127.0.0.1:34733:34733)] 2024-12-07T13:27:58,096 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/WALs/test.com,8080,1/test.com%2C8080%2C1.1733578078072 to hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/oldWALs/test.com%2C8080%2C1.1733578078072 2024-12-07T13:27:58,096 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,096 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,096 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,096 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,097 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741836_1012 (size=93) 2024-12-07T13:27:58,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741836_1012 (size=93) 2024-12-07T13:27:58,099 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/oldWALs 2024-12-07T13:27:58,099 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733578078084) 2024-12-07T13:27:58,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-07T13:27:58,100 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:27:58,100 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:27:58,100 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:58,100 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:58,100 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T13:27:58,100 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=674170800, stopped=false 2024-12-07T13:27:58,100 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-07T13:27:58,100 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=c7c455b68129,41799,1733578076500 2024-12-07T13:27:58,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:27:58,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T13:27:58,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:58,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:58,124 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:27:58,124 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-07T13:27:58,124 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:27:58,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:58,125 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:27:58,125 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'c7c455b68129,33821,1733578076918' ***** 2024-12-07T13:27:58,125 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T13:27:58,125 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-07T13:27:58,125 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T13:27:58,125 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-07T13:27:58,125 INFO [RS:0;c7c455b68129:33821 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T13:27:58,125 INFO [RS:0;c7c455b68129:33821 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T13:27:58,125 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(959): stopping server c7c455b68129,33821,1733578076918 2024-12-07T13:27:58,125 INFO [RS:0;c7c455b68129:33821 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:27:58,125 INFO [RS:0;c7c455b68129:33821 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;c7c455b68129:33821. 2024-12-07T13:27:58,125 DEBUG [RS:0;c7c455b68129:33821 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-07T13:27:58,125 DEBUG [RS:0;c7c455b68129:33821 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:58,125 INFO [RS:0;c7c455b68129:33821 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T13:27:58,125 INFO [RS:0;c7c455b68129:33821 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T13:27:58,125 INFO [RS:0;c7c455b68129:33821 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T13:27:58,125 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-07T13:27:58,126 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-07T13:27:58,126 DEBUG [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-07T13:27:58,126 DEBUG [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-07T13:27:58,126 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-07T13:27:58,126 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-07T13:27:58,126 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-07T13:27:58,126 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T13:27:58,126 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T13:27:58,126 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-07T13:27:58,139 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740/.tmp/ns/b5ee0f1154404fd9bb4186500712b394 is 43, key is default/ns:d/1733578078008/Put/seqid=0 2024-12-07T13:27:58,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741837_1013 (size=5153) 2024-12-07T13:27:58,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741837_1013 (size=5153) 2024-12-07T13:27:58,144 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740/.tmp/ns/b5ee0f1154404fd9bb4186500712b394 2024-12-07T13:27:58,148 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740/.tmp/ns/b5ee0f1154404fd9bb4186500712b394 as hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740/ns/b5ee0f1154404fd9bb4186500712b394 2024-12-07T13:27:58,153 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740/ns/b5ee0f1154404fd9bb4186500712b394, entries=2, sequenceid=6, filesize=5.0 K 2024-12-07T13:27:58,153 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 27ms, sequenceid=6, compaction requested=false 2024-12-07T13:27:58,154 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T13:27:58,158 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T13:27:58,158 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:27:58,158 INFO [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-07T13:27:58,158 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733578078126Running coprocessor pre-close hooks at 1733578078126Disabling compacts and flushes for region at 1733578078126Disabling writes for close at 1733578078126Obtaining lock to block concurrent updates at 1733578078126Preparing flush snapshotting stores in 1588230740 at 1733578078126Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733578078126Flushing stores of hbase:meta,,1.1588230740 at 1733578078127 (+1 ms)Flushing 1588230740/ns: creating writer at 1733578078127Flushing 1588230740/ns: appending metadata at 1733578078139 (+12 ms)Flushing 1588230740/ns: closing flushed file at 1733578078139Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76039561: reopening flushed file at 1733578078148 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 27ms, sequenceid=6, compaction requested=false at 1733578078153 (+5 ms)Writing region close event to WAL at 1733578078154 (+1 ms)Running coprocessor post-close hooks at 1733578078158 (+4 ms)Closed at 1733578078158 2024-12-07T13:27:58,158 DEBUG [RS_CLOSE_META-regionserver/c7c455b68129:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T13:27:58,326 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(976): stopping server c7c455b68129,33821,1733578076918; all regions closed. 2024-12-07T13:27:58,327 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,327 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,327 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,327 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,327 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741834_1010 (size=1152) 2024-12-07T13:27:58,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741834_1010 (size=1152) 2024-12-07T13:27:58,336 DEBUG [RS:0;c7c455b68129:33821 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/oldWALs 2024-12-07T13:27:58,336 INFO [RS:0;c7c455b68129:33821 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C33821%2C1733578076918.meta:.meta(num 1733578077894) 2024-12-07T13:27:58,337 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,337 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,337 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,337 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,338 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741833_1009 (size=93) 2024-12-07T13:27:58,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741833_1009 (size=93) 2024-12-07T13:27:58,341 DEBUG [RS:0;c7c455b68129:33821 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/oldWALs 2024-12-07T13:27:58,341 INFO [RS:0;c7c455b68129:33821 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog c7c455b68129%2C33821%2C1733578076918:(num 1733578077629) 2024-12-07T13:27:58,341 DEBUG [RS:0;c7c455b68129:33821 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T13:27:58,341 INFO [RS:0;c7c455b68129:33821 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T13:27:58,341 INFO [RS:0;c7c455b68129:33821 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:27:58,341 INFO [RS:0;c7c455b68129:33821 {}] hbase.ChoreService(370): Chore service for: regionserver/c7c455b68129:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-07T13:27:58,341 INFO [RS:0;c7c455b68129:33821 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:27:58,341 INFO [regionserver/c7c455b68129:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:27:58,341 INFO [RS:0;c7c455b68129:33821 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33821 2024-12-07T13:27:58,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/c7c455b68129,33821,1733578076918 2024-12-07T13:27:58,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T13:27:58,354 INFO [RS:0;c7c455b68129:33821 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:27:58,366 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [c7c455b68129,33821,1733578076918] 2024-12-07T13:27:58,377 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/c7c455b68129,33821,1733578076918 already deleted, retry=false 2024-12-07T13:27:58,377 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; c7c455b68129,33821,1733578076918 expired; onlineServers=0 2024-12-07T13:27:58,377 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'c7c455b68129,41799,1733578076500' ***** 2024-12-07T13:27:58,377 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T13:27:58,377 INFO [M:0;c7c455b68129:41799 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-07T13:27:58,377 INFO [M:0;c7c455b68129:41799 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-07T13:27:58,377 DEBUG [M:0;c7c455b68129:41799 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T13:27:58,377 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T13:27:58,377 DEBUG [M:0;c7c455b68129:41799 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T13:27:58,377 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733578077274 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.large.0-1733578077274,5,FailOnTimeoutGroup] 2024-12-07T13:27:58,377 DEBUG [master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733578077274 {}] cleaner.HFileCleaner(306): Exit Thread[master/c7c455b68129:0:becomeActiveMaster-HFileCleaner.small.0-1733578077274,5,FailOnTimeoutGroup] 2024-12-07T13:27:58,377 INFO [M:0;c7c455b68129:41799 {}] hbase.ChoreService(370): Chore service for: master/c7c455b68129:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-07T13:27:58,378 INFO [M:0;c7c455b68129:41799 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-07T13:27:58,378 DEBUG [M:0;c7c455b68129:41799 {}] master.HMaster(1795): Stopping service threads 2024-12-07T13:27:58,378 INFO [M:0;c7c455b68129:41799 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T13:27:58,378 INFO [M:0;c7c455b68129:41799 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-07T13:27:58,378 INFO [M:0;c7c455b68129:41799 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T13:27:58,378 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T13:27:58,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T13:27:58,387 DEBUG [M:0;c7c455b68129:41799 {}] zookeeper.ZKUtil(347): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T13:27:58,387 WARN [M:0;c7c455b68129:41799 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T13:27:58,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T13:27:58,388 INFO [M:0;c7c455b68129:41799 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/.lastflushedseqids 2024-12-07T13:27:58,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741838_1014 (size=99) 2024-12-07T13:27:58,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741838_1014 (size=99) 2024-12-07T13:27:58,397 INFO [M:0;c7c455b68129:41799 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-07T13:27:58,397 INFO [M:0;c7c455b68129:41799 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T13:27:58,397 DEBUG [M:0;c7c455b68129:41799 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T13:27:58,397 INFO [M:0;c7c455b68129:41799 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:58,397 DEBUG [M:0;c7c455b68129:41799 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:58,397 DEBUG [M:0;c7c455b68129:41799 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T13:27:58,397 DEBUG [M:0;c7c455b68129:41799 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:58,398 INFO [M:0;c7c455b68129:41799 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-07T13:27:58,414 DEBUG [M:0;c7c455b68129:41799 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f37382246b564657ac146fa319aca22b is 82, key is hbase:meta,,1/info:regioninfo/1733578077920/Put/seqid=0 2024-12-07T13:27:58,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741839_1015 (size=5672) 2024-12-07T13:27:58,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741839_1015 (size=5672) 2024-12-07T13:27:58,418 INFO [M:0;c7c455b68129:41799 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f37382246b564657ac146fa319aca22b 2024-12-07T13:27:58,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T13:27:58,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T13:27:58,427 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-07T13:27:58,434 DEBUG [M:0;c7c455b68129:41799 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00b7db7b878b4f6296ae41236da27475 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733578078012/Put/seqid=0 2024-12-07T13:27:58,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741840_1016 (size=5275) 2024-12-07T13:27:58,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741840_1016 (size=5275) 2024-12-07T13:27:58,438 INFO [M:0;c7c455b68129:41799 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00b7db7b878b4f6296ae41236da27475 2024-12-07T13:27:58,453 DEBUG [M:0;c7c455b68129:41799 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8a21ae9670aa4c069931dcab8a9c6d80 is 69, key is c7c455b68129,33821,1733578076918/rs:state/1733578077482/Put/seqid=0 2024-12-07T13:27:58,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741841_1017 (size=5156) 2024-12-07T13:27:58,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741841_1017 (size=5156) 2024-12-07T13:27:58,457 INFO [M:0;c7c455b68129:41799 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8a21ae9670aa4c069931dcab8a9c6d80 2024-12-07T13:27:58,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:58,466 INFO [RS:0;c7c455b68129:33821 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:27:58,466 INFO [RS:0;c7c455b68129:33821 {}] regionserver.HRegionServer(1031): Exiting; stopping=c7c455b68129,33821,1733578076918; zookeeper connection closed. 2024-12-07T13:27:58,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33821-0x1000076380a0001, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:58,467 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@74fe1b01 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@74fe1b01 2024-12-07T13:27:58,467 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-07T13:27:58,472 DEBUG [M:0;c7c455b68129:41799 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/408d9b783bbd48839812dc4bf88c8a93 is 52, key is load_balancer_on/state:d/1733578078068/Put/seqid=0 2024-12-07T13:27:58,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741842_1018 (size=5056) 2024-12-07T13:27:58,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741842_1018 (size=5056) 2024-12-07T13:27:58,476 INFO [M:0;c7c455b68129:41799 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/408d9b783bbd48839812dc4bf88c8a93 2024-12-07T13:27:58,480 DEBUG [M:0;c7c455b68129:41799 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f37382246b564657ac146fa319aca22b as hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f37382246b564657ac146fa319aca22b 2024-12-07T13:27:58,484 INFO [M:0;c7c455b68129:41799 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f37382246b564657ac146fa319aca22b, entries=8, sequenceid=29, filesize=5.5 K 2024-12-07T13:27:58,485 DEBUG [M:0;c7c455b68129:41799 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00b7db7b878b4f6296ae41236da27475 as hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/00b7db7b878b4f6296ae41236da27475 2024-12-07T13:27:58,488 INFO [M:0;c7c455b68129:41799 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/00b7db7b878b4f6296ae41236da27475, entries=3, sequenceid=29, filesize=5.2 K 2024-12-07T13:27:58,489 DEBUG [M:0;c7c455b68129:41799 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8a21ae9670aa4c069931dcab8a9c6d80 as hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8a21ae9670aa4c069931dcab8a9c6d80 2024-12-07T13:27:58,493 INFO [M:0;c7c455b68129:41799 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8a21ae9670aa4c069931dcab8a9c6d80, entries=1, sequenceid=29, filesize=5.0 K 2024-12-07T13:27:58,493 DEBUG [M:0;c7c455b68129:41799 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/408d9b783bbd48839812dc4bf88c8a93 as hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/408d9b783bbd48839812dc4bf88c8a93 2024-12-07T13:27:58,497 INFO [M:0;c7c455b68129:41799 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45737/user/jenkins/test-data/6c56ff26-3bd7-d39e-4fc2-e485aa42ab7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/408d9b783bbd48839812dc4bf88c8a93, entries=1, sequenceid=29, filesize=4.9 K 2024-12-07T13:27:58,498 INFO [M:0;c7c455b68129:41799 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 101ms, sequenceid=29, compaction requested=false 2024-12-07T13:27:58,499 INFO [M:0;c7c455b68129:41799 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T13:27:58,499 DEBUG [M:0;c7c455b68129:41799 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733578078397Disabling compacts and flushes for region at 1733578078397Disabling writes for close at 1733578078397Obtaining lock to block concurrent updates at 1733578078398 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733578078398Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733578078398Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733578078399 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733578078399Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733578078413 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733578078413Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733578078421 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733578078434 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733578078434Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733578078442 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733578078453 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733578078453Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733578078460 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733578078472 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733578078472Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dd944c2: reopening flushed file at 1733578078479 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@577c079c: reopening flushed file at 1733578078484 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3086198b: reopening flushed file at 1733578078488 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@224875fa: reopening flushed file at 1733578078493 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 101ms, sequenceid=29, compaction requested=false at 1733578078498 (+5 ms)Writing region close event to WAL at 1733578078499 (+1 ms)Closed at 1733578078499 2024-12-07T13:27:58,499 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,500 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,500 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,500 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,500 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-07T13:27:58,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39125 is added to blk_1073741830_1006 (size=10311) 2024-12-07T13:27:58,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45873 is added to blk_1073741830_1006 (size=10311) 2024-12-07T13:27:58,502 INFO [M:0;c7c455b68129:41799 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-07T13:27:58,502 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-07T13:27:58,502 INFO [M:0;c7c455b68129:41799 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41799 2024-12-07T13:27:58,502 INFO [M:0;c7c455b68129:41799 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-07T13:27:58,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:58,614 INFO [M:0;c7c455b68129:41799 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-07T13:27:58,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41799-0x1000076380a0000, quorum=127.0.0.1:60064, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T13:27:58,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@403020f8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:27:58,620 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6f4abee{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:27:58,620 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:27:58,621 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2753102b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:27:58,621 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2551ca75{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/hadoop.log.dir/,STOPPED} 2024-12-07T13:27:58,623 WARN [BP-816922664-172.17.0.3-1733578074321 heartbeating to localhost/127.0.0.1:45737 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:27:58,623 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:27:58,623 WARN [BP-816922664-172.17.0.3-1733578074321 heartbeating to localhost/127.0.0.1:45737 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-816922664-172.17.0.3-1733578074321 (Datanode Uuid 26fc21ae-d79a-4e66-9a67-5813c2e293f1) service to localhost/127.0.0.1:45737 2024-12-07T13:27:58,623 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:27:58,624 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/data/data3/current/BP-816922664-172.17.0.3-1733578074321 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:58,625 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/data/data4/current/BP-816922664-172.17.0.3-1733578074321 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:58,625 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:27:58,628 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ec1c28e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T13:27:58,628 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2601a9a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:27:58,628 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:27:58,628 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56063e0c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:27:58,628 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1cfa6b2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/hadoop.log.dir/,STOPPED} 2024-12-07T13:27:58,629 WARN [BP-816922664-172.17.0.3-1733578074321 heartbeating to localhost/127.0.0.1:45737 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T13:27:58,629 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T13:27:58,629 WARN [BP-816922664-172.17.0.3-1733578074321 heartbeating to localhost/127.0.0.1:45737 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-816922664-172.17.0.3-1733578074321 (Datanode Uuid 14f8f4b2-dd45-4ed4-ac1f-b5cee7cc69fe) service to localhost/127.0.0.1:45737 2024-12-07T13:27:58,629 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T13:27:58,630 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/data/data1/current/BP-816922664-172.17.0.3-1733578074321 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:58,630 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/cluster_82231e54-2080-fe71-d282-39ea83365658/data/data2/current/BP-816922664-172.17.0.3-1733578074321 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T13:27:58,630 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T13:27:58,635 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@265f6a26{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T13:27:58,635 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64e1b9c7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T13:27:58,635 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T13:27:58,635 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40f3733a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T13:27:58,635 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18478920{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0fa6162d-4bf2-57be-4763-22e9af1a8e11/hadoop.log.dir/,STOPPED} 2024-12-07T13:27:58,641 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-07T13:27:58,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,43699,1733577893481/c7c455b68129%2C43699%2C1733577893481.meta.1733577894498.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:58,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:35785/user/jenkins/test-data/966e0c19-31eb-5544-9319-2a06460f4084/WALs/c7c455b68129,42863,1733577894737/c7c455b68129%2C42863%2C1733577894737.1733577894972 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-07T13:27:58,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-07T13:27:58,663 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 229) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45737 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45737 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45737 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45737 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:45737 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45737 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45737 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45737 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 512) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=157 (was 144) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=15641 (was 15668)