2024-11-13 00:27:34,960 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-13 00:27:34,976 main DEBUG Took 0.013473 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-13 00:27:34,976 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-13 00:27:34,977 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-13 00:27:34,978 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-13 00:27:34,979 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:34,988 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-13 00:27:35,004 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,006 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,006 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,007 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,007 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,008 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,009 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,009 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,010 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,010 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,011 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,011 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,012 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,012 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,013 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,013 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,014 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,014 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,015 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,015 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,016 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,016 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,017 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,017 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-13 00:27:35,018 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,018 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-13 00:27:35,020 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-13 00:27:35,022 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-13 00:27:35,024 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-13 00:27:35,025 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-13 00:27:35,026 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-13 00:27:35,027 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-13 00:27:35,037 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-13 00:27:35,040 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-13 00:27:35,041 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-13 00:27:35,042 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-13 00:27:35,042 main DEBUG createAppenders(={Console}) 2024-11-13 00:27:35,043 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-13 00:27:35,044 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-13 00:27:35,044 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-13 00:27:35,045 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-13 00:27:35,045 main DEBUG OutputStream closed 2024-11-13 00:27:35,045 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-13 00:27:35,046 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-13 00:27:35,046 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-13 00:27:35,120 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-13 00:27:35,122 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-13 00:27:35,123 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-13 00:27:35,124 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-13 00:27:35,124 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-13 00:27:35,125 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-13 00:27:35,125 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-13 00:27:35,126 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-13 00:27:35,126 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-13 00:27:35,127 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-13 00:27:35,127 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-13 00:27:35,128 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-13 00:27:35,128 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-13 00:27:35,129 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-13 00:27:35,129 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-13 00:27:35,129 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-13 00:27:35,129 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-13 00:27:35,130 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-13 00:27:35,132 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13 00:27:35,133 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-13 00:27:35,133 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-13 00:27:35,134 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-13T00:27:35,386 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5 2024-11-13 00:27:35,390 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-13 00:27:35,391 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-13T00:27:35,402 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-13T00:27:35,434 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=141, ProcessCount=11, AvailableMemoryMB=1771 2024-11-13T00:27:35,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T00:27:35,453 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879, deleteOnExit=true 2024-11-13T00:27:35,454 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T00:27:35,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/test.cache.data in system properties and HBase conf 2024-11-13T00:27:35,455 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T00:27:35,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/hadoop.log.dir in system properties and HBase conf 2024-11-13T00:27:35,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T00:27:35,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T00:27:35,457 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T00:27:35,556 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-13T00:27:35,673 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T00:27:35,676 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:27:35,677 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:27:35,678 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T00:27:35,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:27:35,679 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T00:27:35,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T00:27:35,680 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:27:35,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:27:35,681 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T00:27:35,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/nfs.dump.dir in system properties and HBase conf 2024-11-13T00:27:35,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/java.io.tmpdir in system properties and HBase conf 2024-11-13T00:27:35,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:27:35,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T00:27:35,683 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T00:27:36,181 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:27:36,548 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-13T00:27:36,621 INFO [Time-limited test {}] log.Log(170): Logging initialized @2468ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-13T00:27:36,693 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:27:36,754 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:27:36,774 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:27:36,774 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:27:36,775 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:27:36,788 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:27:36,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c5202f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:27:36,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae7f863{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:27:36,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7c1a236c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/java.io.tmpdir/jetty-localhost-45575-hadoop-hdfs-3_4_1-tests_jar-_-any-11086508312123725749/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:27:36,996 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a625720{HTTP/1.1, (http/1.1)}{localhost:45575} 2024-11-13T00:27:36,996 INFO [Time-limited test {}] server.Server(415): Started @2844ms 2024-11-13T00:27:37,033 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:27:37,384 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:27:37,390 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:27:37,391 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:27:37,392 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:27:37,392 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:27:37,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@460757e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:27:37,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e911877{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:27:37,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3b32dfc5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/java.io.tmpdir/jetty-localhost-44543-hadoop-hdfs-3_4_1-tests_jar-_-any-16480691777646587442/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:27:37,516 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43ebd249{HTTP/1.1, (http/1.1)}{localhost:44543} 2024-11-13T00:27:37,517 INFO [Time-limited test {}] server.Server(415): Started @3365ms 2024-11-13T00:27:37,577 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:27:37,699 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:27:37,704 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:27:37,705 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:27:37,705 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:27:37,705 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:27:37,706 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@154eec55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:27:37,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29058af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:27:37,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78ec6c63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/java.io.tmpdir/jetty-localhost-41089-hadoop-hdfs-3_4_1-tests_jar-_-any-17534926242029448948/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:27:37,835 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@633713c3{HTTP/1.1, (http/1.1)}{localhost:41089} 2024-11-13T00:27:37,836 INFO [Time-limited test {}] server.Server(415): Started @3684ms 2024-11-13T00:27:37,839 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:27:38,021 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/data/data2/current/BP-1622935336-172.17.0.2-1731457656287/current, will proceed with Du for space computation calculation, 2024-11-13T00:27:38,021 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/data/data1/current/BP-1622935336-172.17.0.2-1731457656287/current, will proceed with Du for space computation calculation, 2024-11-13T00:27:38,021 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/data/data3/current/BP-1622935336-172.17.0.2-1731457656287/current, will proceed with Du for space computation calculation, 2024-11-13T00:27:38,021 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/data/data4/current/BP-1622935336-172.17.0.2-1731457656287/current, will proceed with Du for space computation calculation, 2024-11-13T00:27:38,074 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:27:38,074 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:27:38,142 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa5a6eab81b97535e with lease ID 0x24e6a00695a618fa: Processing first storage report for DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf from datanode DatanodeRegistration(127.0.0.1:34907, datanodeUuid=4ad17a83-175b-4dbe-82fa-6ce1a0b71e83, infoPort=39731, infoSecurePort=0, ipcPort=33633, storageInfo=lv=-57;cid=testClusterID;nsid=1467632119;c=1731457656287) 2024-11-13T00:27:38,144 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa5a6eab81b97535e with lease ID 0x24e6a00695a618fa: from storage DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf node DatanodeRegistration(127.0.0.1:34907, datanodeUuid=4ad17a83-175b-4dbe-82fa-6ce1a0b71e83, infoPort=39731, infoSecurePort=0, ipcPort=33633, storageInfo=lv=-57;cid=testClusterID;nsid=1467632119;c=1731457656287), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-13T00:27:38,144 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x933b2cc28602ed28 with lease ID 0x24e6a00695a618fb: Processing first storage report for DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71 from datanode DatanodeRegistration(127.0.0.1:36357, datanodeUuid=1f745f00-d095-4aeb-861e-a30a9e71ed37, infoPort=46287, infoSecurePort=0, ipcPort=35163, storageInfo=lv=-57;cid=testClusterID;nsid=1467632119;c=1731457656287) 2024-11-13T00:27:38,145 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x933b2cc28602ed28 with lease ID 0x24e6a00695a618fb: from storage DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71 node DatanodeRegistration(127.0.0.1:36357, datanodeUuid=1f745f00-d095-4aeb-861e-a30a9e71ed37, infoPort=46287, infoSecurePort=0, ipcPort=35163, storageInfo=lv=-57;cid=testClusterID;nsid=1467632119;c=1731457656287), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T00:27:38,145 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x933b2cc28602ed28 with lease ID 0x24e6a00695a618fb: Processing first storage report for DS-21657e52-d095-4c6a-a393-0ff190532da5 from datanode DatanodeRegistration(127.0.0.1:36357, datanodeUuid=1f745f00-d095-4aeb-861e-a30a9e71ed37, infoPort=46287, infoSecurePort=0, ipcPort=35163, storageInfo=lv=-57;cid=testClusterID;nsid=1467632119;c=1731457656287) 2024-11-13T00:27:38,145 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x933b2cc28602ed28 with lease ID 0x24e6a00695a618fb: from storage DS-21657e52-d095-4c6a-a393-0ff190532da5 node DatanodeRegistration(127.0.0.1:36357, datanodeUuid=1f745f00-d095-4aeb-861e-a30a9e71ed37, infoPort=46287, infoSecurePort=0, ipcPort=35163, storageInfo=lv=-57;cid=testClusterID;nsid=1467632119;c=1731457656287), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:27:38,146 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa5a6eab81b97535e with lease ID 0x24e6a00695a618fa: Processing first storage report for DS-09ef4b75-e758-4b4d-b8b8-31938d1c9948 from datanode DatanodeRegistration(127.0.0.1:34907, datanodeUuid=4ad17a83-175b-4dbe-82fa-6ce1a0b71e83, infoPort=39731, infoSecurePort=0, ipcPort=33633, storageInfo=lv=-57;cid=testClusterID;nsid=1467632119;c=1731457656287) 2024-11-13T00:27:38,146 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa5a6eab81b97535e with lease ID 0x24e6a00695a618fa: from storage DS-09ef4b75-e758-4b4d-b8b8-31938d1c9948 node DatanodeRegistration(127.0.0.1:34907, datanodeUuid=4ad17a83-175b-4dbe-82fa-6ce1a0b71e83, infoPort=39731, infoSecurePort=0, ipcPort=33633, storageInfo=lv=-57;cid=testClusterID;nsid=1467632119;c=1731457656287), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:27:38,229 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5 2024-11-13T00:27:38,306 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/zookeeper_0, clientPort=64321, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T00:27:38,315 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64321 2024-11-13T00:27:38,326 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:27:38,329 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:27:38,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:27:38,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:27:38,961 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a with version=8 2024-11-13T00:27:38,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/hbase-staging 2024-11-13T00:27:39,052 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-13T00:27:39,301 INFO [Time-limited test {}] client.ConnectionUtils(128): master/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:27:39,312 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:27:39,313 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:27:39,317 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:27:39,317 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:27:39,317 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:27:39,451 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T00:27:39,513 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-13T00:27:39,522 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-13T00:27:39,526 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:27:39,554 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 17403 (auto-detected) 2024-11-13T00:27:39,555 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-13T00:27:39,574 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35075 2024-11-13T00:27:39,596 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35075 connecting to ZooKeeper ensemble=127.0.0.1:64321 2024-11-13T00:27:39,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:350750x0, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:27:39,634 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35075-0x1010eb918400000 connected 2024-11-13T00:27:39,666 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:27:39,669 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:27:39,680 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:27:39,685 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a, hbase.cluster.distributed=false 2024-11-13T00:27:39,707 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:27:39,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35075 2024-11-13T00:27:39,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35075 2024-11-13T00:27:39,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35075 2024-11-13T00:27:39,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35075 2024-11-13T00:27:39,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35075 2024-11-13T00:27:39,824 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:27:39,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:27:39,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:27:39,827 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:27:39,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:27:39,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:27:39,830 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T00:27:39,832 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:27:39,833 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35769 2024-11-13T00:27:39,835 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35769 connecting to ZooKeeper ensemble=127.0.0.1:64321 2024-11-13T00:27:39,836 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:27:39,839 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:27:39,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:357690x0, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:27:39,845 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:357690x0, quorum=127.0.0.1:64321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:27:39,846 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35769-0x1010eb918400001 connected 2024-11-13T00:27:39,849 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T00:27:39,857 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T00:27:39,859 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T00:27:39,864 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:27:39,865 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35769 2024-11-13T00:27:39,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35769 2024-11-13T00:27:39,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35769 2024-11-13T00:27:39,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35769 2024-11-13T00:27:39,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35769 2024-11-13T00:27:39,887 DEBUG [M:0;cb28580903fb:35075 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;cb28580903fb:35075 2024-11-13T00:27:39,888 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/cb28580903fb,35075,1731457659104 2024-11-13T00:27:39,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:27:39,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:27:39,897 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/cb28580903fb,35075,1731457659104 2024-11-13T00:27:39,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T00:27:39,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:39,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:39,918 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T00:27:39,920 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/cb28580903fb,35075,1731457659104 from backup master directory 2024-11-13T00:27:39,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/cb28580903fb,35075,1731457659104 2024-11-13T00:27:39,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:27:39,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:27:39,924 WARN [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:27:39,924 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=cb28580903fb,35075,1731457659104 2024-11-13T00:27:39,926 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-13T00:27:39,927 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-13T00:27:39,983 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/hbase.id] with ID: 3b6677a6-8a1a-4c84-ba4d-321a4e3cfafe 2024-11-13T00:27:39,984 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/.tmp/hbase.id 2024-11-13T00:27:39,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:27:39,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:27:39,997 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/.tmp/hbase.id]:[hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/hbase.id] 2024-11-13T00:27:40,042 INFO [master/cb28580903fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:27:40,048 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T00:27:40,067 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-11-13T00:27:40,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:40,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:40,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:27:40,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:27:40,103 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:27:40,106 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T00:27:40,112 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:27:40,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:27:40,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:27:40,162 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store 2024-11-13T00:27:40,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:27:40,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:27:40,188 INFO [master/cb28580903fb:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-13T00:27:40,192 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:27:40,194 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:27:40,194 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:27:40,194 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:27:40,196 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:27:40,197 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:27:40,197 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:27:40,198 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457660194Disabling compacts and flushes for region at 1731457660194Disabling writes for close at 1731457660196 (+2 ms)Writing region close event to WAL at 1731457660197 (+1 ms)Closed at 1731457660197 2024-11-13T00:27:40,201 WARN [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/.initializing 2024-11-13T00:27:40,201 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/WALs/cb28580903fb,35075,1731457659104 2024-11-13T00:27:40,223 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C35075%2C1731457659104, suffix=, logDir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/WALs/cb28580903fb,35075,1731457659104, archiveDir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/oldWALs, maxLogs=10 2024-11-13T00:27:40,232 INFO [master/cb28580903fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35075%2C1731457659104.1731457660228 2024-11-13T00:27:40,252 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/WALs/cb28580903fb,35075,1731457659104/cb28580903fb%2C35075%2C1731457659104.1731457660228 2024-11-13T00:27:40,263 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39731:39731),(127.0.0.1/127.0.0.1:46287:46287)] 2024-11-13T00:27:40,264 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:27:40,264 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:27:40,267 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,268 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T00:27:40,342 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:40,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:27:40,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,349 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T00:27:40,349 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:40,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:27:40,350 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,353 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T00:27:40,353 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:40,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:27:40,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T00:27:40,356 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:40,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:27:40,358 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,361 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,362 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,367 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,367 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,370 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T00:27:40,373 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:27:40,378 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:27:40,379 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847961, jitterRate=0.07823887467384338}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T00:27:40,385 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731457660280Initializing all the Stores at 1731457660283 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457660283Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457660284 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457660284Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457660284Cleaning up temporary data from old regions at 1731457660367 (+83 ms)Region opened successfully at 1731457660385 (+18 ms) 2024-11-13T00:27:40,386 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T00:27:40,421 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a0e08b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:27:40,453 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T00:27:40,464 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T00:27:40,465 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T00:27:40,468 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T00:27:40,469 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-13T00:27:40,474 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-13T00:27:40,474 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T00:27:40,499 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T00:27:40,507 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T00:27:40,509 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T00:27:40,511 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T00:27:40,512 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T00:27:40,514 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T00:27:40,516 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T00:27:40,519 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T00:27:40,522 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T00:27:40,524 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T00:27:40,526 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T00:27:40,543 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T00:27:40,545 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T00:27:40,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:27:40,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:27:40,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:40,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:40,552 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=cb28580903fb,35075,1731457659104, sessionid=0x1010eb918400000, setting cluster-up flag (Was=false) 2024-11-13T00:27:40,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:40,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:40,573 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T00:27:40,575 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,35075,1731457659104 2024-11-13T00:27:40,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:40,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:40,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T00:27:40,590 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,35075,1731457659104 2024-11-13T00:27:40,596 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T00:27:40,670 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T00:27:40,676 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(746): ClusterId : 3b6677a6-8a1a-4c84-ba4d-321a4e3cfafe 2024-11-13T00:27:40,679 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T00:27:40,680 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T00:27:40,685 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T00:27:40,685 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T00:27:40,687 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T00:27:40,688 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T00:27:40,689 DEBUG [RS:0;cb28580903fb:35769 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1015e56e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:27:40,693 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: cb28580903fb,35075,1731457659104 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T00:27:40,701 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:27:40,701 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:27:40,701 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:27:40,701 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:27:40,701 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/cb28580903fb:0, corePoolSize=10, maxPoolSize=10 2024-11-13T00:27:40,702 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,702 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:27:40,702 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,703 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731457690703 2024-11-13T00:27:40,704 DEBUG [RS:0;cb28580903fb:35769 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;cb28580903fb:35769 2024-11-13T00:27:40,706 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T00:27:40,707 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T00:27:40,707 INFO [RS:0;cb28580903fb:35769 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T00:27:40,707 INFO [RS:0;cb28580903fb:35769 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T00:27:40,707 DEBUG [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T00:27:40,708 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:27:40,708 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T00:27:40,710 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(2659): reportForDuty to master=cb28580903fb,35075,1731457659104 with port=35769, startcode=1731457659784 2024-11-13T00:27:40,712 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T00:27:40,712 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T00:27:40,713 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T00:27:40,713 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T00:27:40,714 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:40,715 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T00:27:40,714 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,717 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T00:27:40,719 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T00:27:40,720 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T00:27:40,723 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T00:27:40,723 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T00:27:40,725 DEBUG [RS:0;cb28580903fb:35769 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T00:27:40,729 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457660725,5,FailOnTimeoutGroup] 2024-11-13T00:27:40,732 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457660730,5,FailOnTimeoutGroup] 2024-11-13T00:27:40,732 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,733 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T00:27:40,734 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,734 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:27:40,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:27:40,742 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T00:27:40,742 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a 2024-11-13T00:27:40,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:27:40,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:27:40,761 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:27:40,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:27:40,767 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:27:40,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:40,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:27:40,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:27:40,776 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:27:40,776 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:40,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:27:40,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:27:40,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:27:40,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:40,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:27:40,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:27:40,786 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:27:40,786 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:40,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:27:40,788 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:27:40,789 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740 2024-11-13T00:27:40,790 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740 2024-11-13T00:27:40,794 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:27:40,794 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:27:40,795 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:27:40,798 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:27:40,803 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:27:40,804 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708141, jitterRate=-0.09955248236656189}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:27:40,805 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46951, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T00:27:40,808 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731457660761Initializing all the Stores at 1731457660763 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457660763Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457660763Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457660764 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457660764Cleaning up temporary data from old regions at 1731457660794 (+30 ms)Region opened successfully at 1731457660808 (+14 ms) 2024-11-13T00:27:40,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:27:40,811 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:27:40,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:27:40,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:27:40,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:27:40,812 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:27:40,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457660810Disabling compacts and flushes for region at 1731457660810Disabling writes for close at 1731457660811 (+1 ms)Writing region close event to WAL at 1731457660812 (+1 ms)Closed at 1731457660812 2024-11-13T00:27:40,813 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35075 {}] master.ServerManager(363): Checking decommissioned status of RegionServer cb28580903fb,35769,1731457659784 2024-11-13T00:27:40,816 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35075 {}] master.ServerManager(517): Registering regionserver=cb28580903fb,35769,1731457659784 2024-11-13T00:27:40,817 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:27:40,817 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T00:27:40,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T00:27:40,832 DEBUG [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a 2024-11-13T00:27:40,832 DEBUG [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36657 2024-11-13T00:27:40,832 DEBUG [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T00:27:40,837 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:27:40,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:27:40,840 DEBUG [RS:0;cb28580903fb:35769 {}] zookeeper.ZKUtil(111): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/cb28580903fb,35769,1731457659784 2024-11-13T00:27:40,840 WARN [RS:0;cb28580903fb:35769 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:27:40,840 INFO [RS:0;cb28580903fb:35769 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:27:40,841 DEBUG [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784 2024-11-13T00:27:40,841 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T00:27:40,842 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [cb28580903fb,35769,1731457659784] 2024-11-13T00:27:40,875 INFO [RS:0;cb28580903fb:35769 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T00:27:40,892 INFO [RS:0;cb28580903fb:35769 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T00:27:40,897 INFO [RS:0;cb28580903fb:35769 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T00:27:40,898 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,898 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T00:27:40,904 INFO [RS:0;cb28580903fb:35769 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T00:27:40,906 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,906 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,906 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,906 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,906 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,907 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,907 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:27:40,907 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,907 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,907 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,907 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,908 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,908 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:27:40,908 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:27:40,908 DEBUG [RS:0;cb28580903fb:35769 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:27:40,909 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,909 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,909 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,909 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,909 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,909 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35769,1731457659784-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:27:40,928 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T00:27:40,930 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35769,1731457659784-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,930 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,930 INFO [RS:0;cb28580903fb:35769 {}] regionserver.Replication(171): cb28580903fb,35769,1731457659784 started 2024-11-13T00:27:40,949 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:40,949 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(1482): Serving as cb28580903fb,35769,1731457659784, RpcServer on cb28580903fb/172.17.0.2:35769, sessionid=0x1010eb918400001 2024-11-13T00:27:40,950 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T00:27:40,951 DEBUG [RS:0;cb28580903fb:35769 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager cb28580903fb,35769,1731457659784 2024-11-13T00:27:40,951 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,35769,1731457659784' 2024-11-13T00:27:40,951 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T00:27:40,952 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T00:27:40,953 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T00:27:40,953 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T00:27:40,953 DEBUG [RS:0;cb28580903fb:35769 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager cb28580903fb,35769,1731457659784 2024-11-13T00:27:40,953 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,35769,1731457659784' 2024-11-13T00:27:40,953 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T00:27:40,954 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T00:27:40,955 DEBUG [RS:0;cb28580903fb:35769 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T00:27:40,955 INFO [RS:0;cb28580903fb:35769 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T00:27:40,955 INFO [RS:0;cb28580903fb:35769 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T00:27:40,992 WARN [cb28580903fb:35075 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T00:27:41,063 INFO [RS:0;cb28580903fb:35769 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C35769%2C1731457659784, suffix=, logDir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784, archiveDir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs, maxLogs=32 2024-11-13T00:27:41,066 INFO [RS:0;cb28580903fb:35769 {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35769%2C1731457659784.1731457661065 2024-11-13T00:27:41,075 INFO [RS:0;cb28580903fb:35769 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457661065 2024-11-13T00:27:41,076 DEBUG [RS:0;cb28580903fb:35769 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39731:39731),(127.0.0.1/127.0.0.1:46287:46287)] 2024-11-13T00:27:41,244 DEBUG [cb28580903fb:35075 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T00:27:41,256 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=cb28580903fb,35769,1731457659784 2024-11-13T00:27:41,263 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,35769,1731457659784, state=OPENING 2024-11-13T00:27:41,267 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T00:27:41,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:41,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:27:41,271 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:27:41,271 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:27:41,273 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:27:41,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,35769,1731457659784}] 2024-11-13T00:27:41,449 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T00:27:41,453 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46299, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T00:27:41,464 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T00:27:41,465 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:27:41,468 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C35769%2C1731457659784.meta, suffix=.meta, logDir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784, archiveDir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs, maxLogs=32 2024-11-13T00:27:41,470 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35769%2C1731457659784.meta.1731457661470.meta 2024-11-13T00:27:41,477 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.meta.1731457661470.meta 2024-11-13T00:27:41,480 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39731:39731),(127.0.0.1/127.0.0.1:46287:46287)] 2024-11-13T00:27:41,484 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:27:41,486 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T00:27:41,489 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T00:27:41,493 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T00:27:41,498 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T00:27:41,498 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:27:41,499 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T00:27:41,499 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T00:27:41,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:27:41,504 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:27:41,504 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:41,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:27:41,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:27:41,507 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:27:41,507 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:41,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:27:41,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:27:41,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:27:41,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:41,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:27:41,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:27:41,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:27:41,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:41,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:27:41,514 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:27:41,516 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740 2024-11-13T00:27:41,519 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740 2024-11-13T00:27:41,521 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:27:41,522 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:27:41,523 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:27:41,526 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:27:41,528 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862666, jitterRate=0.09693720936775208}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:27:41,528 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T00:27:41,530 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731457661500Writing region info on filesystem at 1731457661500Initializing all the Stores at 1731457661501 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457661502 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457661502Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457661502Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457661502Cleaning up temporary data from old regions at 1731457661522 (+20 ms)Running coprocessor post-open hooks at 1731457661528 (+6 ms)Region opened successfully at 1731457661530 (+2 ms) 2024-11-13T00:27:41,539 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731457661440 2024-11-13T00:27:41,552 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T00:27:41,553 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T00:27:41,555 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,35769,1731457659784 2024-11-13T00:27:41,557 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,35769,1731457659784, state=OPEN 2024-11-13T00:27:41,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:27:41,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:27:41,564 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:27:41,564 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:27:41,565 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=cb28580903fb,35769,1731457659784 2024-11-13T00:27:41,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T00:27:41,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,35769,1731457659784 in 291 msec 2024-11-13T00:27:41,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T00:27:41,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 746 msec 2024-11-13T00:27:41,578 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:27:41,578 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T00:27:41,604 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:27:41,605 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,35769,1731457659784, seqNum=-1] 2024-11-13T00:27:41,633 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:27:41,635 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49427, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:27:41,657 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0310 sec 2024-11-13T00:27:41,657 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731457661657, completionTime=-1 2024-11-13T00:27:41,660 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T00:27:41,660 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T00:27:41,688 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T00:27:41,688 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731457721688 2024-11-13T00:27:41,688 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731457781688 2024-11-13T00:27:41,688 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 27 msec 2024-11-13T00:27:41,691 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35075,1731457659104-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:41,691 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35075,1731457659104-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:41,691 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35075,1731457659104-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:41,693 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-cb28580903fb:35075, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:41,693 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:41,694 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:41,699 DEBUG [master/cb28580903fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T00:27:41,719 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.795sec 2024-11-13T00:27:41,720 INFO [master/cb28580903fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T00:27:41,722 INFO [master/cb28580903fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T00:27:41,723 INFO [master/cb28580903fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T00:27:41,723 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T00:27:41,723 INFO [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T00:27:41,724 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35075,1731457659104-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:27:41,725 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35075,1731457659104-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T00:27:41,732 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T00:27:41,733 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T00:27:41,734 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35075,1731457659104-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:27:41,787 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18b51bbe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:27:41,789 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-13T00:27:41,789 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-13T00:27:41,793 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request cb28580903fb,35075,-1 for getting cluster id 2024-11-13T00:27:41,796 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T00:27:41,804 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3b6677a6-8a1a-4c84-ba4d-321a4e3cfafe' 2024-11-13T00:27:41,807 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T00:27:41,808 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3b6677a6-8a1a-4c84-ba4d-321a4e3cfafe" 2024-11-13T00:27:41,810 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e3fdc6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:27:41,810 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [cb28580903fb,35075,-1] 2024-11-13T00:27:41,813 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T00:27:41,815 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:27:41,817 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50876, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T00:27:41,820 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@543c1b08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:27:41,821 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:27:41,829 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,35769,1731457659784, seqNum=-1] 2024-11-13T00:27:41,829 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:27:41,832 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38424, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:27:41,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=cb28580903fb,35075,1731457659104 2024-11-13T00:27:41,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:27:41,862 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T00:27:41,866 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T00:27:41,872 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is cb28580903fb,35075,1731457659104 2024-11-13T00:27:41,893 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1329d828 2024-11-13T00:27:41,894 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T00:27:41,897 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50888, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T00:27:41,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35075 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T00:27:41,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35075 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T00:27:41,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35075 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:27:41,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35075 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-13T00:27:41,916 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T00:27:41,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35075 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-13T00:27:41,918 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:41,921 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T00:27:41,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35075 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T00:27:41,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741835_1011 (size=389) 2024-11-13T00:27:41,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741835_1011 (size=389) 2024-11-13T00:27:41,972 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a84351998556cbd83db95e9b5415ada0, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a 2024-11-13T00:27:41,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741836_1012 (size=72) 2024-11-13T00:27:41,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741836_1012 (size=72) 2024-11-13T00:27:41,982 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:27:41,982 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing a84351998556cbd83db95e9b5415ada0, disabling compactions & flushes 2024-11-13T00:27:41,983 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:27:41,983 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:27:41,983 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. after waiting 0 ms 2024-11-13T00:27:41,983 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:27:41,983 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:27:41,983 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for a84351998556cbd83db95e9b5415ada0: Waiting for close lock at 1731457661982Disabling compacts and flushes for region at 1731457661982Disabling writes for close at 1731457661983 (+1 ms)Writing region close event to WAL at 1731457661983Closed at 1731457661983 2024-11-13T00:27:41,985 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T00:27:41,990 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731457661985"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731457661985"}]},"ts":"1731457661985"} 2024-11-13T00:27:41,995 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T00:27:41,997 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T00:27:41,999 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731457661997"}]},"ts":"1731457661997"} 2024-11-13T00:27:42,004 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-13T00:27:42,005 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a84351998556cbd83db95e9b5415ada0, ASSIGN}] 2024-11-13T00:27:42,007 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a84351998556cbd83db95e9b5415ada0, ASSIGN 2024-11-13T00:27:42,009 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a84351998556cbd83db95e9b5415ada0, ASSIGN; state=OFFLINE, location=cb28580903fb,35769,1731457659784; forceNewPlan=false, retain=false 2024-11-13T00:27:42,161 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a84351998556cbd83db95e9b5415ada0, regionState=OPENING, regionLocation=cb28580903fb,35769,1731457659784 2024-11-13T00:27:42,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a84351998556cbd83db95e9b5415ada0, ASSIGN because future has completed 2024-11-13T00:27:42,166 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a84351998556cbd83db95e9b5415ada0, server=cb28580903fb,35769,1731457659784}] 2024-11-13T00:27:42,327 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:27:42,327 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a84351998556cbd83db95e9b5415ada0, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:27:42,328 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,328 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:27:42,328 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,328 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,330 INFO [StoreOpener-a84351998556cbd83db95e9b5415ada0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,333 INFO [StoreOpener-a84351998556cbd83db95e9b5415ada0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a84351998556cbd83db95e9b5415ada0 columnFamilyName info 2024-11-13T00:27:42,333 DEBUG [StoreOpener-a84351998556cbd83db95e9b5415ada0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:27:42,334 INFO [StoreOpener-a84351998556cbd83db95e9b5415ada0-1 {}] regionserver.HStore(327): Store=a84351998556cbd83db95e9b5415ada0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:27:42,334 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,335 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,336 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,337 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,337 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,340 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,343 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:27:42,344 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a84351998556cbd83db95e9b5415ada0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=788355, jitterRate=0.0024458765983581543}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T00:27:42,344 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a84351998556cbd83db95e9b5415ada0 2024-11-13T00:27:42,345 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a84351998556cbd83db95e9b5415ada0: Running coprocessor pre-open hook at 1731457662328Writing region info on filesystem at 1731457662328Initializing all the Stores at 1731457662330 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457662330Cleaning up temporary data from old regions at 1731457662337 (+7 ms)Running coprocessor post-open hooks at 1731457662344 (+7 ms)Region opened successfully at 1731457662345 (+1 ms) 2024-11-13T00:27:42,347 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0., pid=6, masterSystemTime=1731457662320 2024-11-13T00:27:42,350 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:27:42,350 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:27:42,351 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a84351998556cbd83db95e9b5415ada0, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,35769,1731457659784 2024-11-13T00:27:42,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a84351998556cbd83db95e9b5415ada0, server=cb28580903fb,35769,1731457659784 because future has completed 2024-11-13T00:27:42,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T00:27:42,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a84351998556cbd83db95e9b5415ada0, server=cb28580903fb,35769,1731457659784 in 191 msec 2024-11-13T00:27:42,366 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T00:27:42,366 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a84351998556cbd83db95e9b5415ada0, ASSIGN in 357 msec 2024-11-13T00:27:42,368 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T00:27:42,369 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731457662368"}]},"ts":"1731457662368"} 2024-11-13T00:27:42,373 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-13T00:27:42,374 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T00:27:42,378 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 468 msec 2024-11-13T00:27:47,001 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-13T00:27:47,044 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T00:27:47,046 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-13T00:27:49,509 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T00:27:49,510 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T00:27:49,511 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-13T00:27:49,511 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T00:27:49,512 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:27:49,512 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T00:27:49,513 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T00:27:49,513 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T00:27:51,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35075 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T00:27:51,966 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-13T00:27:51,969 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-13T00:27:51,975 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-13T00:27:51,976 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:27:51,977 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35769%2C1731457659784.1731457671976 2024-11-13T00:27:51,985 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:27:51,985 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:27:51,985 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:27:51,985 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:27:51,985 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:27:51,986 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457661065 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457671976 2024-11-13T00:27:51,987 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46287:46287),(127.0.0.1/127.0.0.1:39731:39731)] 2024-11-13T00:27:51,987 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457661065 is not closed yet, will try archiving it next time 2024-11-13T00:27:51,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741833_1009 (size=451) 2024-11-13T00:27:51,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741833_1009 (size=451) 2024-11-13T00:27:51,990 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457661065 to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs/cb28580903fb%2C35769%2C1731457659784.1731457661065 2024-11-13T00:27:51,996 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0., hostname=cb28580903fb,35769,1731457659784, seqNum=2] 2024-11-13T00:28:04,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35769 {}] regionserver.HRegion(8855): Flush requested on a84351998556cbd83db95e9b5415ada0 2024-11-13T00:28:04,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a84351998556cbd83db95e9b5415ada0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T00:28:04,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/4b1e38d582104bf7b8974bc127ae34e7 is 1080, key is row0001/info:/1731457671999/Put/seqid=0 2024-11-13T00:28:04,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741838_1014 (size=12509) 2024-11-13T00:28:04,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741838_1014 (size=12509) 2024-11-13T00:28:04,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/4b1e38d582104bf7b8974bc127ae34e7 2024-11-13T00:28:04,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/4b1e38d582104bf7b8974bc127ae34e7 as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4b1e38d582104bf7b8974bc127ae34e7 2024-11-13T00:28:04,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4b1e38d582104bf7b8974bc127ae34e7, entries=7, sequenceid=11, filesize=12.2 K 2024-11-13T00:28:04,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a84351998556cbd83db95e9b5415ada0 in 140ms, sequenceid=11, compaction requested=false 2024-11-13T00:28:04,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a84351998556cbd83db95e9b5415ada0: 2024-11-13T00:28:08,226 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T00:28:12,041 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35769%2C1731457659784.1731457692041 2024-11-13T00:28:12,250 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:12,250 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:12,250 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:12,250 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:12,250 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:12,250 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:12,251 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457671976 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457692041 2024-11-13T00:28:12,252 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46287:46287),(127.0.0.1/127.0.0.1:39731:39731)] 2024-11-13T00:28:12,252 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457671976 is not closed yet, will try archiving it next time 2024-11-13T00:28:12,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741837_1013 (size=12399) 2024-11-13T00:28:12,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741837_1013 (size=12399) 2024-11-13T00:28:12,455 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:14,659 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:16,863 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:19,068 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35769 {}] regionserver.HRegion(8855): Flush requested on a84351998556cbd83db95e9b5415ada0 2024-11-13T00:28:19,068 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a84351998556cbd83db95e9b5415ada0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T00:28:19,270 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:19,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/470644ae8d3e46f199ee1f11e8b3580e is 1080, key is row0008/info:/1731457686030/Put/seqid=0 2024-11-13T00:28:19,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741840_1016 (size=12509) 2024-11-13T00:28:19,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741840_1016 (size=12509) 2024-11-13T00:28:19,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/470644ae8d3e46f199ee1f11e8b3580e 2024-11-13T00:28:19,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/470644ae8d3e46f199ee1f11e8b3580e as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/470644ae8d3e46f199ee1f11e8b3580e 2024-11-13T00:28:19,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/470644ae8d3e46f199ee1f11e8b3580e, entries=7, sequenceid=21, filesize=12.2 K 2024-11-13T00:28:19,506 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:19,506 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a84351998556cbd83db95e9b5415ada0 in 437ms, sequenceid=21, compaction requested=false 2024-11-13T00:28:19,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a84351998556cbd83db95e9b5415ada0: 2024-11-13T00:28:19,506 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-13T00:28:19,506 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:28:19,507 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4b1e38d582104bf7b8974bc127ae34e7 because midkey is the same as first or last row 2024-11-13T00:28:21,272 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:21,780 INFO [master/cb28580903fb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T00:28:21,780 INFO [master/cb28580903fb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T00:28:23,475 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:23,477 WARN [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:23,478 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C35769%2C1731457659784:(num 1731457692041) roll requested 2024-11-13T00:28:23,479 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35769%2C1731457659784.1731457703479 2024-11-13T00:28:23,690 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK], DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK]] 2024-11-13T00:28:23,690 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:23,690 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:23,691 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:23,691 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:23,691 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:23,691 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457692041 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457703479 2024-11-13T00:28:23,692 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39731:39731),(127.0.0.1/127.0.0.1:46287:46287)] 2024-11-13T00:28:23,692 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457692041 is not closed yet, will try archiving it next time 2024-11-13T00:28:23,693 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457671976 to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs/cb28580903fb%2C35769%2C1731457659784.1731457671976 2024-11-13T00:28:23,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741839_1015 (size=7739) 2024-11-13T00:28:23,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741839_1015 (size=7739) 2024-11-13T00:28:25,679 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:27,328 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a84351998556cbd83db95e9b5415ada0, had cached 0 bytes from a total of 25018 2024-11-13T00:28:27,884 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:30,088 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:32,292 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:34,294 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T00:28:34,294 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35769%2C1731457659784.1731457714294 2024-11-13T00:28:38,227 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T00:28:39,306 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:39,308 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:39,309 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C35769%2C1731457659784:(num 1731457714294) roll requested 2024-11-13T00:28:39,309 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:39,309 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:39,309 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:39,309 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:39,309 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:39,310 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457703479 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457714294 2024-11-13T00:28:39,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741841_1017 (size=4753) 2024-11-13T00:28:39,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741841_1017 (size=4753) 2024-11-13T00:28:39,317 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39731:39731),(127.0.0.1/127.0.0.1:46287:46287)] 2024-11-13T00:28:39,317 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457703479 is not closed yet, will try archiving it next time 2024-11-13T00:28:39,317 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35769%2C1731457659784.1731457719317 2024-11-13T00:28:44,320 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:44,320 WARN [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:44,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35769 {}] regionserver.HRegion(8855): Flush requested on a84351998556cbd83db95e9b5415ada0 2024-11-13T00:28:44,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a84351998556cbd83db95e9b5415ada0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T00:28:44,326 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:44,326 WARN [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:46,321 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T00:28:49,322 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:49,323 WARN [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:49,323 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:49,323 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:49,323 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:49,323 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:49,324 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:49,324 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457714294 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457719317 2024-11-13T00:28:49,325 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39731:39731),(127.0.0.1/127.0.0.1:46287:46287)] 2024-11-13T00:28:49,325 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457714294 is not closed yet, will try archiving it next time 2024-11-13T00:28:49,325 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C35769%2C1731457659784:(num 1731457719317) roll requested 2024-11-13T00:28:49,326 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35769%2C1731457659784.1731457729325 2024-11-13T00:28:49,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741842_1018 (size=1569) 2024-11-13T00:28:49,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741842_1018 (size=1569) 2024-11-13T00:28:49,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/40f328a35e8d425b8f1fe2c305c6a647 is 1080, key is row0015/info:/1731457701070/Put/seqid=0 2024-11-13T00:28:49,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741844_1020 (size=12509) 2024-11-13T00:28:49,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741844_1020 (size=12509) 2024-11-13T00:28:49,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/40f328a35e8d425b8f1fe2c305c6a647 2024-11-13T00:28:49,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/40f328a35e8d425b8f1fe2c305c6a647 as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/40f328a35e8d425b8f1fe2c305c6a647 2024-11-13T00:28:49,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/40f328a35e8d425b8f1fe2c305c6a647, entries=7, sequenceid=31, filesize=12.2 K 2024-11-13T00:28:54,335 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:54,335 WARN [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:54,363 INFO [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:54,363 WARN [FSHLog-0-hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a-prefix:cb28580903fb,35769,1731457659784 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34907,DS-0fba3322-3b05-4261-a75d-fbd4306bb1bf,DISK], DatanodeInfoWithStorage[127.0.0.1:36357,DS-eb6af8ad-fdb4-43dd-9bce-4e752d43af71,DISK]] 2024-11-13T00:28:54,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a84351998556cbd83db95e9b5415ada0 in 10043ms, sequenceid=31, compaction requested=true 2024-11-13T00:28:54,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a84351998556cbd83db95e9b5415ada0: 2024-11-13T00:28:54,363 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,363 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,364 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-13T00:28:54,364 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:28:54,364 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,364 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4b1e38d582104bf7b8974bc127ae34e7 because midkey is the same as first or last row 2024-11-13T00:28:54,364 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,364 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,364 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457719317 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457729325 2024-11-13T00:28:54,365 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39731:39731),(127.0.0.1/127.0.0.1:46287:46287)] 2024-11-13T00:28:54,365 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457719317 is not closed yet, will try archiving it next time 2024-11-13T00:28:54,365 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457692041 to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs/cb28580903fb%2C35769%2C1731457659784.1731457692041 2024-11-13T00:28:54,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a84351998556cbd83db95e9b5415ada0:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:28:54,365 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C35769%2C1731457659784:(num 1731457734365) roll requested 2024-11-13T00:28:54,366 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35769%2C1731457659784.1731457734365 2024-11-13T00:28:54,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741843_1019 (size=438) 2024-11-13T00:28:54,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741843_1019 (size=438) 2024-11-13T00:28:54,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:28:54,369 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457703479 to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs/cb28580903fb%2C35769%2C1731457659784.1731457703479 2024-11-13T00:28:54,369 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:28:54,370 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457714294 to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs/cb28580903fb%2C35769%2C1731457659784.1731457714294 2024-11-13T00:28:54,372 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457719317 to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs/cb28580903fb%2C35769%2C1731457659784.1731457719317 2024-11-13T00:28:54,372 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:28:54,374 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.HStore(1541): a84351998556cbd83db95e9b5415ada0/info is initiating minor compaction (all files) 2024-11-13T00:28:54,374 INFO [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a84351998556cbd83db95e9b5415ada0/info in TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:28:54,375 INFO [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4b1e38d582104bf7b8974bc127ae34e7, hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/470644ae8d3e46f199ee1f11e8b3580e, hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/40f328a35e8d425b8f1fe2c305c6a647] into tmpdir=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp, totalSize=36.6 K 2024-11-13T00:28:54,376 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4b1e38d582104bf7b8974bc127ae34e7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731457671999 2024-11-13T00:28:54,377 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] compactions.Compactor(225): Compacting 470644ae8d3e46f199ee1f11e8b3580e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731457686030 2024-11-13T00:28:54,378 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] compactions.Compactor(225): Compacting 40f328a35e8d425b8f1fe2c305c6a647, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731457701070 2024-11-13T00:28:54,382 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,382 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,383 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,383 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,383 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,383 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457729325 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457734365 2024-11-13T00:28:54,384 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39731:39731),(127.0.0.1/127.0.0.1:46287:46287)] 2024-11-13T00:28:54,384 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457729325 is not closed yet, will try archiving it next time 2024-11-13T00:28:54,385 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35769%2C1731457659784.1731457734384 2024-11-13T00:28:54,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741845_1021 (size=93) 2024-11-13T00:28:54,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741845_1021 (size=93) 2024-11-13T00:28:54,387 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457729325 to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs/cb28580903fb%2C35769%2C1731457659784.1731457729325 2024-11-13T00:28:54,392 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,392 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,392 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,392 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,392 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:28:54,392 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457734365 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/WALs/cb28580903fb,35769,1731457659784/cb28580903fb%2C35769%2C1731457659784.1731457734384 2024-11-13T00:28:54,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741846_1022 (size=1258) 2024-11-13T00:28:54,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741846_1022 (size=1258) 2024-11-13T00:28:54,401 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39731:39731),(127.0.0.1/127.0.0.1:46287:46287)] 2024-11-13T00:28:54,418 INFO [RS:0;cb28580903fb:35769-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a84351998556cbd83db95e9b5415ada0#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:28:54,420 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/4bffcb7795a24b8baed15f04b535989b is 1080, key is row0001/info:/1731457671999/Put/seqid=0 2024-11-13T00:28:54,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741848_1024 (size=27710) 2024-11-13T00:28:54,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741848_1024 (size=27710) 2024-11-13T00:28:54,439 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/4bffcb7795a24b8baed15f04b535989b as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4bffcb7795a24b8baed15f04b535989b 2024-11-13T00:28:54,458 INFO [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a84351998556cbd83db95e9b5415ada0/info of a84351998556cbd83db95e9b5415ada0 into 4bffcb7795a24b8baed15f04b535989b(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:28:54,458 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a84351998556cbd83db95e9b5415ada0: 2024-11-13T00:28:54,460 INFO [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0., storeName=a84351998556cbd83db95e9b5415ada0/info, priority=13, startTime=1731457734365; duration=0sec 2024-11-13T00:28:54,460 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T00:28:54,460 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:28:54,460 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4bffcb7795a24b8baed15f04b535989b because midkey is the same as first or last row 2024-11-13T00:28:54,460 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T00:28:54,460 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:28:54,461 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4bffcb7795a24b8baed15f04b535989b because midkey is the same as first or last row 2024-11-13T00:28:54,461 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-13T00:28:54,461 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:28:54,461 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4bffcb7795a24b8baed15f04b535989b because midkey is the same as first or last row 2024-11-13T00:28:54,461 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:28:54,461 DEBUG [RS:0;cb28580903fb:35769-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a84351998556cbd83db95e9b5415ada0:info 2024-11-13T00:29:06,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35769 {}] regionserver.HRegion(8855): Flush requested on a84351998556cbd83db95e9b5415ada0 2024-11-13T00:29:06,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a84351998556cbd83db95e9b5415ada0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T00:29:06,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/3316b0d13e62494c92276444a1809874 is 1080, key is row0022/info:/1731457734386/Put/seqid=0 2024-11-13T00:29:06,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741849_1025 (size=12509) 2024-11-13T00:29:06,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741849_1025 (size=12509) 2024-11-13T00:29:06,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/3316b0d13e62494c92276444a1809874 2024-11-13T00:29:06,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/3316b0d13e62494c92276444a1809874 as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/3316b0d13e62494c92276444a1809874 2024-11-13T00:29:06,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/3316b0d13e62494c92276444a1809874, entries=7, sequenceid=42, filesize=12.2 K 2024-11-13T00:29:06,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a84351998556cbd83db95e9b5415ada0 in 437ms, sequenceid=42, compaction requested=false 2024-11-13T00:29:06,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a84351998556cbd83db95e9b5415ada0: 2024-11-13T00:29:06,848 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-13T00:29:06,848 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:06,848 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4bffcb7795a24b8baed15f04b535989b because midkey is the same as first or last row 2024-11-13T00:29:08,227 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T00:29:12,328 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a84351998556cbd83db95e9b5415ada0, had cached 0 bytes from a total of 40219 2024-11-13T00:29:14,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T00:29:14,422 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:29:14,422 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:29:14,427 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:14,428 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:14,428 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T00:29:14,428 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T00:29:14,428 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=63992445, stopped=false 2024-11-13T00:29:14,428 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=cb28580903fb,35075,1731457659104 2024-11-13T00:29:14,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:14,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:14,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:14,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:14,430 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:29:14,431 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:29:14,431 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:29:14,431 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:14,431 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:14,431 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:14,431 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'cb28580903fb,35769,1731457659784' ***** 2024-11-13T00:29:14,431 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T00:29:14,432 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T00:29:14,432 INFO [RS:0;cb28580903fb:35769 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T00:29:14,432 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T00:29:14,432 INFO [RS:0;cb28580903fb:35769 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T00:29:14,433 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(3091): Received CLOSE for a84351998556cbd83db95e9b5415ada0 2024-11-13T00:29:14,433 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(959): stopping server cb28580903fb,35769,1731457659784 2024-11-13T00:29:14,433 INFO [RS:0;cb28580903fb:35769 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:29:14,433 INFO [RS:0;cb28580903fb:35769 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;cb28580903fb:35769. 2024-11-13T00:29:14,433 DEBUG [RS:0;cb28580903fb:35769 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:29:14,433 DEBUG [RS:0;cb28580903fb:35769 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:14,433 INFO [RS:0;cb28580903fb:35769 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T00:29:14,433 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a84351998556cbd83db95e9b5415ada0, disabling compactions & flushes 2024-11-13T00:29:14,434 INFO [RS:0;cb28580903fb:35769 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T00:29:14,434 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:29:14,434 INFO [RS:0;cb28580903fb:35769 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T00:29:14,434 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:29:14,434 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T00:29:14,434 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. after waiting 0 ms 2024-11-13T00:29:14,434 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:29:14,434 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a84351998556cbd83db95e9b5415ada0 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-13T00:29:14,434 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T00:29:14,434 DEBUG [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, a84351998556cbd83db95e9b5415ada0=TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.} 2024-11-13T00:29:14,434 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:29:14,434 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:29:14,434 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:29:14,434 DEBUG [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a84351998556cbd83db95e9b5415ada0 2024-11-13T00:29:14,434 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:29:14,435 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:29:14,435 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-13T00:29:14,441 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/53b2e9ac6a544c77ad2a10e373226beb is 1080, key is row0029/info:/1731457748412/Put/seqid=0 2024-11-13T00:29:14,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741850_1026 (size=8193) 2024-11-13T00:29:14,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741850_1026 (size=8193) 2024-11-13T00:29:14,448 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/53b2e9ac6a544c77ad2a10e373226beb 2024-11-13T00:29:14,458 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/.tmp/info/fef37b7b1ec74c22861c9f733486719d is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0./info:regioninfo/1731457662351/Put/seqid=0 2024-11-13T00:29:14,459 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/.tmp/info/53b2e9ac6a544c77ad2a10e373226beb as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/53b2e9ac6a544c77ad2a10e373226beb 2024-11-13T00:29:14,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741851_1027 (size=7016) 2024-11-13T00:29:14,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741851_1027 (size=7016) 2024-11-13T00:29:14,465 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/.tmp/info/fef37b7b1ec74c22861c9f733486719d 2024-11-13T00:29:14,468 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/53b2e9ac6a544c77ad2a10e373226beb, entries=3, sequenceid=48, filesize=8.0 K 2024-11-13T00:29:14,470 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for a84351998556cbd83db95e9b5415ada0 in 35ms, sequenceid=48, compaction requested=true 2024-11-13T00:29:14,470 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4b1e38d582104bf7b8974bc127ae34e7, hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/470644ae8d3e46f199ee1f11e8b3580e, hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/40f328a35e8d425b8f1fe2c305c6a647] to archive 2024-11-13T00:29:14,473 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T00:29:14,476 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4b1e38d582104bf7b8974bc127ae34e7 to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/4b1e38d582104bf7b8974bc127ae34e7 2024-11-13T00:29:14,478 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/470644ae8d3e46f199ee1f11e8b3580e to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/470644ae8d3e46f199ee1f11e8b3580e 2024-11-13T00:29:14,480 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/40f328a35e8d425b8f1fe2c305c6a647 to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/info/40f328a35e8d425b8f1fe2c305c6a647 2024-11-13T00:29:14,491 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/.tmp/ns/abe95fdc80a3411398968ae7f39d9b63 is 43, key is default/ns:d/1731457661640/Put/seqid=0 2024-11-13T00:29:14,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741852_1028 (size=5153) 2024-11-13T00:29:14,492 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=cb28580903fb:35075 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T00:29:14,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741852_1028 (size=5153) 2024-11-13T00:29:14,498 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4b1e38d582104bf7b8974bc127ae34e7=12509, 470644ae8d3e46f199ee1f11e8b3580e=12509, 40f328a35e8d425b8f1fe2c305c6a647=12509] 2024-11-13T00:29:14,498 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/.tmp/ns/abe95fdc80a3411398968ae7f39d9b63 2024-11-13T00:29:14,503 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/default/TestLogRolling-testSlowSyncLogRolling/a84351998556cbd83db95e9b5415ada0/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-13T00:29:14,506 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:29:14,506 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a84351998556cbd83db95e9b5415ada0: Waiting for close lock at 1731457754433Running coprocessor pre-close hooks at 1731457754433Disabling compacts and flushes for region at 1731457754433Disabling writes for close at 1731457754434 (+1 ms)Obtaining lock to block concurrent updates at 1731457754434Preparing flush snapshotting stores in a84351998556cbd83db95e9b5415ada0 at 1731457754434Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731457754434Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. at 1731457754436 (+2 ms)Flushing a84351998556cbd83db95e9b5415ada0/info: creating writer at 1731457754436Flushing a84351998556cbd83db95e9b5415ada0/info: appending metadata at 1731457754440 (+4 ms)Flushing a84351998556cbd83db95e9b5415ada0/info: closing flushed file at 1731457754440Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42d657c7: reopening flushed file at 1731457754457 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for a84351998556cbd83db95e9b5415ada0 in 35ms, sequenceid=48, compaction requested=true at 1731457754470 (+13 ms)Writing region close event to WAL at 1731457754499 (+29 ms)Running coprocessor post-close hooks at 1731457754504 (+5 ms)Closed at 1731457754506 (+2 ms) 2024-11-13T00:29:14,507 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731457661899.a84351998556cbd83db95e9b5415ada0. 2024-11-13T00:29:14,529 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/.tmp/table/6a0abc8f225e4f59bb96d6d81538ddf2 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731457662368/Put/seqid=0 2024-11-13T00:29:14,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741853_1029 (size=5396) 2024-11-13T00:29:14,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741853_1029 (size=5396) 2024-11-13T00:29:14,536 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/.tmp/table/6a0abc8f225e4f59bb96d6d81538ddf2 2024-11-13T00:29:14,545 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/.tmp/info/fef37b7b1ec74c22861c9f733486719d as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/info/fef37b7b1ec74c22861c9f733486719d 2024-11-13T00:29:14,553 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/info/fef37b7b1ec74c22861c9f733486719d, entries=10, sequenceid=11, filesize=6.9 K 2024-11-13T00:29:14,554 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/.tmp/ns/abe95fdc80a3411398968ae7f39d9b63 as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/ns/abe95fdc80a3411398968ae7f39d9b63 2024-11-13T00:29:14,562 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/ns/abe95fdc80a3411398968ae7f39d9b63, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T00:29:14,563 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/.tmp/table/6a0abc8f225e4f59bb96d6d81538ddf2 as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/table/6a0abc8f225e4f59bb96d6d81538ddf2 2024-11-13T00:29:14,571 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/table/6a0abc8f225e4f59bb96d6d81538ddf2, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T00:29:14,572 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false 2024-11-13T00:29:14,578 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T00:29:14,579 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:29:14,579 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:29:14,579 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457754434Running coprocessor pre-close hooks at 1731457754434Disabling compacts and flushes for region at 1731457754434Disabling writes for close at 1731457754434Obtaining lock to block concurrent updates at 1731457754435 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731457754435Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731457754435Flushing stores of hbase:meta,,1.1588230740 at 1731457754436 (+1 ms)Flushing 1588230740/info: creating writer at 1731457754436Flushing 1588230740/info: appending metadata at 1731457754457 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731457754457Flushing 1588230740/ns: creating writer at 1731457754473 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731457754491 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731457754491Flushing 1588230740/table: creating writer at 1731457754507 (+16 ms)Flushing 1588230740/table: appending metadata at 1731457754528 (+21 ms)Flushing 1588230740/table: closing flushed file at 1731457754528Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4563030e: reopening flushed file at 1731457754544 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32519fae: reopening flushed file at 1731457754553 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7cc4a74e: reopening flushed file at 1731457754562 (+9 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 137ms, sequenceid=11, compaction requested=false at 1731457754572 (+10 ms)Writing region close event to WAL at 1731457754574 (+2 ms)Running coprocessor post-close hooks at 1731457754579 (+5 ms)Closed at 1731457754579 2024-11-13T00:29:14,579 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T00:29:14,635 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(976): stopping server cb28580903fb,35769,1731457659784; all regions closed. 2024-11-13T00:29:14,637 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:14,637 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:14,637 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:14,637 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:14,637 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:14,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741834_1010 (size=3066) 2024-11-13T00:29:14,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741834_1010 (size=3066) 2024-11-13T00:29:14,645 DEBUG [RS:0;cb28580903fb:35769 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs 2024-11-13T00:29:14,645 INFO [RS:0;cb28580903fb:35769 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C35769%2C1731457659784.meta:.meta(num 1731457661470) 2024-11-13T00:29:14,645 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:14,645 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:14,645 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:14,646 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:14,646 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:14,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741847_1023 (size=12695) 2024-11-13T00:29:14,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741847_1023 (size=12695) 2024-11-13T00:29:14,653 DEBUG [RS:0;cb28580903fb:35769 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/oldWALs 2024-11-13T00:29:14,653 INFO [RS:0;cb28580903fb:35769 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C35769%2C1731457659784:(num 1731457734384) 2024-11-13T00:29:14,653 DEBUG [RS:0;cb28580903fb:35769 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:14,653 INFO [RS:0;cb28580903fb:35769 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:29:14,653 INFO [RS:0;cb28580903fb:35769 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:29:14,654 INFO [RS:0;cb28580903fb:35769 {}] hbase.ChoreService(370): Chore service for: regionserver/cb28580903fb:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-13T00:29:14,654 INFO [RS:0;cb28580903fb:35769 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:29:14,654 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:29:14,654 INFO [RS:0;cb28580903fb:35769 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35769 2024-11-13T00:29:14,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:29:14,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/cb28580903fb,35769,1731457659784 2024-11-13T00:29:14,659 INFO [RS:0;cb28580903fb:35769 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:29:14,661 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [cb28580903fb,35769,1731457659784] 2024-11-13T00:29:14,664 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/cb28580903fb,35769,1731457659784 already deleted, retry=false 2024-11-13T00:29:14,665 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; cb28580903fb,35769,1731457659784 expired; onlineServers=0 2024-11-13T00:29:14,665 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'cb28580903fb,35075,1731457659104' ***** 2024-11-13T00:29:14,665 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T00:29:14,665 INFO [M:0;cb28580903fb:35075 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:29:14,665 INFO [M:0;cb28580903fb:35075 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:29:14,665 DEBUG [M:0;cb28580903fb:35075 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T00:29:14,665 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T00:29:14,665 DEBUG [M:0;cb28580903fb:35075 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T00:29:14,665 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457660730 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457660730,5,FailOnTimeoutGroup] 2024-11-13T00:29:14,665 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457660725 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457660725,5,FailOnTimeoutGroup] 2024-11-13T00:29:14,666 INFO [M:0;cb28580903fb:35075 {}] hbase.ChoreService(370): Chore service for: master/cb28580903fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T00:29:14,666 INFO [M:0;cb28580903fb:35075 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:29:14,666 DEBUG [M:0;cb28580903fb:35075 {}] master.HMaster(1795): Stopping service threads 2024-11-13T00:29:14,666 INFO [M:0;cb28580903fb:35075 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T00:29:14,666 INFO [M:0;cb28580903fb:35075 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:29:14,667 INFO [M:0;cb28580903fb:35075 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T00:29:14,667 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T00:29:14,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T00:29:14,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:14,668 DEBUG [M:0;cb28580903fb:35075 {}] zookeeper.ZKUtil(347): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T00:29:14,668 WARN [M:0;cb28580903fb:35075 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T00:29:14,669 INFO [M:0;cb28580903fb:35075 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/.lastflushedseqids 2024-11-13T00:29:14,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741854_1030 (size=130) 2024-11-13T00:29:14,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741854_1030 (size=130) 2024-11-13T00:29:14,684 INFO [M:0;cb28580903fb:35075 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T00:29:14,684 INFO [M:0;cb28580903fb:35075 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T00:29:14,685 DEBUG [M:0;cb28580903fb:35075 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:29:14,685 INFO [M:0;cb28580903fb:35075 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:14,685 DEBUG [M:0;cb28580903fb:35075 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:14,685 DEBUG [M:0;cb28580903fb:35075 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:29:14,685 DEBUG [M:0;cb28580903fb:35075 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:14,685 INFO [M:0;cb28580903fb:35075 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-13T00:29:14,704 DEBUG [M:0;cb28580903fb:35075 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/43cb3e45000248629a9027247a7ec443 is 82, key is hbase:meta,,1/info:regioninfo/1731457661554/Put/seqid=0 2024-11-13T00:29:14,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741855_1031 (size=5672) 2024-11-13T00:29:14,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741855_1031 (size=5672) 2024-11-13T00:29:14,712 INFO [M:0;cb28580903fb:35075 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/43cb3e45000248629a9027247a7ec443 2024-11-13T00:29:14,736 DEBUG [M:0;cb28580903fb:35075 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9042668510b84635b2f87d457c2b7da7 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731457662377/Put/seqid=0 2024-11-13T00:29:14,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741856_1032 (size=6247) 2024-11-13T00:29:14,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741856_1032 (size=6247) 2024-11-13T00:29:14,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:29:14,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35769-0x1010eb918400001, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:29:14,762 INFO [RS:0;cb28580903fb:35769 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:29:14,763 INFO [RS:0;cb28580903fb:35769 {}] regionserver.HRegionServer(1031): Exiting; stopping=cb28580903fb,35769,1731457659784; zookeeper connection closed. 2024-11-13T00:29:14,763 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@38a919fc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@38a919fc 2024-11-13T00:29:14,763 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T00:29:14,913 INFO [regionserver/cb28580903fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:29:15,144 INFO [M:0;cb28580903fb:35075 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9042668510b84635b2f87d457c2b7da7 2024-11-13T00:29:15,151 INFO [M:0;cb28580903fb:35075 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9042668510b84635b2f87d457c2b7da7 2024-11-13T00:29:15,166 DEBUG [M:0;cb28580903fb:35075 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/96daf63fe6d94d35b263900e6147dd18 is 69, key is cb28580903fb,35769,1731457659784/rs:state/1731457660818/Put/seqid=0 2024-11-13T00:29:15,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741857_1033 (size=5156) 2024-11-13T00:29:15,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741857_1033 (size=5156) 2024-11-13T00:29:15,173 INFO [M:0;cb28580903fb:35075 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/96daf63fe6d94d35b263900e6147dd18 2024-11-13T00:29:15,194 DEBUG [M:0;cb28580903fb:35075 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b44c5cafe0fa42f894aff23cc0bf2443 is 52, key is load_balancer_on/state:d/1731457661859/Put/seqid=0 2024-11-13T00:29:15,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741858_1034 (size=5056) 2024-11-13T00:29:15,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741858_1034 (size=5056) 2024-11-13T00:29:15,202 INFO [M:0;cb28580903fb:35075 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b44c5cafe0fa42f894aff23cc0bf2443 2024-11-13T00:29:15,211 DEBUG [M:0;cb28580903fb:35075 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/43cb3e45000248629a9027247a7ec443 as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/43cb3e45000248629a9027247a7ec443 2024-11-13T00:29:15,219 INFO [M:0;cb28580903fb:35075 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/43cb3e45000248629a9027247a7ec443, entries=8, sequenceid=59, filesize=5.5 K 2024-11-13T00:29:15,220 DEBUG [M:0;cb28580903fb:35075 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9042668510b84635b2f87d457c2b7da7 as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9042668510b84635b2f87d457c2b7da7 2024-11-13T00:29:15,227 INFO [M:0;cb28580903fb:35075 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9042668510b84635b2f87d457c2b7da7 2024-11-13T00:29:15,227 INFO [M:0;cb28580903fb:35075 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9042668510b84635b2f87d457c2b7da7, entries=6, sequenceid=59, filesize=6.1 K 2024-11-13T00:29:15,228 DEBUG [M:0;cb28580903fb:35075 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/96daf63fe6d94d35b263900e6147dd18 as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/96daf63fe6d94d35b263900e6147dd18 2024-11-13T00:29:15,234 INFO [M:0;cb28580903fb:35075 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/96daf63fe6d94d35b263900e6147dd18, entries=1, sequenceid=59, filesize=5.0 K 2024-11-13T00:29:15,236 DEBUG [M:0;cb28580903fb:35075 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b44c5cafe0fa42f894aff23cc0bf2443 as hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b44c5cafe0fa42f894aff23cc0bf2443 2024-11-13T00:29:15,241 INFO [M:0;cb28580903fb:35075 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b44c5cafe0fa42f894aff23cc0bf2443, entries=1, sequenceid=59, filesize=4.9 K 2024-11-13T00:29:15,243 INFO [M:0;cb28580903fb:35075 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 558ms, sequenceid=59, compaction requested=false 2024-11-13T00:29:15,244 INFO [M:0;cb28580903fb:35075 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:15,245 DEBUG [M:0;cb28580903fb:35075 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457754685Disabling compacts and flushes for region at 1731457754685Disabling writes for close at 1731457754685Obtaining lock to block concurrent updates at 1731457754685Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731457754685Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731457754686 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731457754686Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731457754687 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731457754703 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731457754703Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731457754719 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731457754735 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731457754735Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731457755151 (+416 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731457755166 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731457755166Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731457755178 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731457755193 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731457755193Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67192fd: reopening flushed file at 1731457755210 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4262a09b: reopening flushed file at 1731457755219 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@689a4fac: reopening flushed file at 1731457755227 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@180fd209: reopening flushed file at 1731457755235 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 558ms, sequenceid=59, compaction requested=false at 1731457755243 (+8 ms)Writing region close event to WAL at 1731457755244 (+1 ms)Closed at 1731457755244 2024-11-13T00:29:15,245 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:15,246 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:15,246 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:15,246 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:15,246 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:15,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36357 is added to blk_1073741830_1006 (size=27973) 2024-11-13T00:29:15,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34907 is added to blk_1073741830_1006 (size=27973) 2024-11-13T00:29:15,250 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:29:15,250 INFO [M:0;cb28580903fb:35075 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T00:29:15,250 INFO [M:0;cb28580903fb:35075 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35075 2024-11-13T00:29:15,250 INFO [M:0;cb28580903fb:35075 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:29:15,353 INFO [M:0;cb28580903fb:35075 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:29:15,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:29:15,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35075-0x1010eb918400000, quorum=127.0.0.1:64321, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:29:15,358 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78ec6c63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:15,360 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@633713c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:29:15,360 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:29:15,360 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29058af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:29:15,360 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@154eec55{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/hadoop.log.dir/,STOPPED} 2024-11-13T00:29:15,364 WARN [BP-1622935336-172.17.0.2-1731457656287 heartbeating to localhost/127.0.0.1:36657 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:29:15,364 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:29:15,364 WARN [BP-1622935336-172.17.0.2-1731457656287 heartbeating to localhost/127.0.0.1:36657 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1622935336-172.17.0.2-1731457656287 (Datanode Uuid 1f745f00-d095-4aeb-861e-a30a9e71ed37) service to localhost/127.0.0.1:36657 2024-11-13T00:29:15,364 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:29:15,365 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/data/data3/current/BP-1622935336-172.17.0.2-1731457656287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:15,365 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/data/data4/current/BP-1622935336-172.17.0.2-1731457656287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:15,366 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:29:15,368 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3b32dfc5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:15,369 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43ebd249{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:29:15,369 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:29:15,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e911877{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:29:15,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@460757e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/hadoop.log.dir/,STOPPED} 2024-11-13T00:29:15,371 WARN [BP-1622935336-172.17.0.2-1731457656287 heartbeating to localhost/127.0.0.1:36657 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:29:15,371 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:29:15,371 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:29:15,371 WARN [BP-1622935336-172.17.0.2-1731457656287 heartbeating to localhost/127.0.0.1:36657 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1622935336-172.17.0.2-1731457656287 (Datanode Uuid 4ad17a83-175b-4dbe-82fa-6ce1a0b71e83) service to localhost/127.0.0.1:36657 2024-11-13T00:29:15,371 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/data/data1/current/BP-1622935336-172.17.0.2-1731457656287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:15,372 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/cluster_282f6315-ffb3-e324-cb38-81a66cca1879/data/data2/current/BP-1622935336-172.17.0.2-1731457656287 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:15,372 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:29:15,381 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7c1a236c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:29:15,382 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a625720{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:29:15,382 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:29:15,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae7f863{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:29:15,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c5202f3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/hadoop.log.dir/,STOPPED} 2024-11-13T00:29:15,392 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T00:29:15,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T00:29:15,434 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=79 (was 12) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:36657 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36657 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36657 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@6a73af6c java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:36657 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36657 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:36657 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/cb28580903fb:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: master/cb28580903fb:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/cb28580903fb:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36657 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:36657 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=207 (was 141) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1362 (was 1771) 2024-11-13T00:29:15,443 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=80, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=207, ProcessCount=11, AvailableMemoryMB=1361 2024-11-13T00:29:15,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T00:29:15,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/hadoop.log.dir so I do NOT create it in target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3 2024-11-13T00:29:15,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efb032a9-b881-862e-342e-4f19ce7d56d5/hadoop.tmp.dir so I do NOT create it in target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3 2024-11-13T00:29:15,443 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8, deleteOnExit=true 2024-11-13T00:29:15,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T00:29:15,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/test.cache.data in system properties and HBase conf 2024-11-13T00:29:15,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T00:29:15,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/hadoop.log.dir in system properties and HBase conf 2024-11-13T00:29:15,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T00:29:15,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T00:29:15,444 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T00:29:15,444 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/nfs.dump.dir in system properties and HBase conf 2024-11-13T00:29:15,445 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/java.io.tmpdir in system properties and HBase conf 2024-11-13T00:29:15,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:29:15,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T00:29:15,446 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T00:29:15,461 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:29:15,542 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:15,550 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:29:15,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:29:15,552 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:29:15,552 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:29:15,553 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:15,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19bf40f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:29:15,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c2cf0b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:29:15,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c790f33{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/java.io.tmpdir/jetty-localhost-38357-hadoop-hdfs-3_4_1-tests_jar-_-any-15463453730898298473/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:29:15,677 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@488dcf65{HTTP/1.1, (http/1.1)}{localhost:38357} 2024-11-13T00:29:15,677 INFO [Time-limited test {}] server.Server(415): Started @101525ms 2024-11-13T00:29:15,695 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:29:15,804 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:15,816 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:29:15,824 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:29:15,824 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:29:15,824 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:29:15,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@232ba44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:29:15,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@695feae9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:29:15,972 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a90f125{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/java.io.tmpdir/jetty-localhost-44161-hadoop-hdfs-3_4_1-tests_jar-_-any-17646577438616629154/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:15,972 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b8beccb{HTTP/1.1, (http/1.1)}{localhost:44161} 2024-11-13T00:29:15,973 INFO [Time-limited test {}] server.Server(415): Started @101821ms 2024-11-13T00:29:15,975 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:29:16,024 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:16,028 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:29:16,030 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:29:16,031 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:29:16,031 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:29:16,031 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74fcc9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:29:16,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d87dfcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:29:16,101 WARN [Thread-437 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/data/data2/current/BP-549882482-172.17.0.2-1731457755480/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:16,101 WARN [Thread-436 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/data/data1/current/BP-549882482-172.17.0.2-1731457755480/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:16,145 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:29:16,150 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x898f13919c31bf32 with lease ID 0x103aac0a3991332d: Processing first storage report for DS-f35b6f9a-c993-47cf-b458-983873360b28 from datanode DatanodeRegistration(127.0.0.1:34995, datanodeUuid=dfbfb49f-5683-4d86-8bee-a0c650df1f89, infoPort=40447, infoSecurePort=0, ipcPort=46663, storageInfo=lv=-57;cid=testClusterID;nsid=1732356154;c=1731457755480) 2024-11-13T00:29:16,151 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x898f13919c31bf32 with lease ID 0x103aac0a3991332d: from storage DS-f35b6f9a-c993-47cf-b458-983873360b28 node DatanodeRegistration(127.0.0.1:34995, datanodeUuid=dfbfb49f-5683-4d86-8bee-a0c650df1f89, infoPort=40447, infoSecurePort=0, ipcPort=46663, storageInfo=lv=-57;cid=testClusterID;nsid=1732356154;c=1731457755480), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T00:29:16,151 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x898f13919c31bf32 with lease ID 0x103aac0a3991332d: Processing first storage report for DS-cdea724a-4ef9-43c6-a212-8251f84acfed from datanode DatanodeRegistration(127.0.0.1:34995, datanodeUuid=dfbfb49f-5683-4d86-8bee-a0c650df1f89, infoPort=40447, infoSecurePort=0, ipcPort=46663, storageInfo=lv=-57;cid=testClusterID;nsid=1732356154;c=1731457755480) 2024-11-13T00:29:16,151 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x898f13919c31bf32 with lease ID 0x103aac0a3991332d: from storage DS-cdea724a-4ef9-43c6-a212-8251f84acfed node DatanodeRegistration(127.0.0.1:34995, datanodeUuid=dfbfb49f-5683-4d86-8bee-a0c650df1f89, infoPort=40447, infoSecurePort=0, ipcPort=46663, storageInfo=lv=-57;cid=testClusterID;nsid=1732356154;c=1731457755480), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:16,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@d071559{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/java.io.tmpdir/jetty-localhost-43073-hadoop-hdfs-3_4_1-tests_jar-_-any-14006520406763589483/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:16,173 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ece158b{HTTP/1.1, (http/1.1)}{localhost:43073} 2024-11-13T00:29:16,173 INFO [Time-limited test {}] server.Server(415): Started @102021ms 2024-11-13T00:29:16,176 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:29:16,298 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/data/data4/current/BP-549882482-172.17.0.2-1731457755480/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:16,298 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/data/data3/current/BP-549882482-172.17.0.2-1731457755480/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:16,317 WARN [Thread-451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:29:16,320 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd4474cae6ae69a6 with lease ID 0x103aac0a3991332e: Processing first storage report for DS-b79e0e58-5599-43c8-ac33-03595ae81da9 from datanode DatanodeRegistration(127.0.0.1:42473, datanodeUuid=6543de5f-8518-406c-be1d-2c4860a53ea5, infoPort=45975, infoSecurePort=0, ipcPort=38545, storageInfo=lv=-57;cid=testClusterID;nsid=1732356154;c=1731457755480) 2024-11-13T00:29:16,321 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd4474cae6ae69a6 with lease ID 0x103aac0a3991332e: from storage DS-b79e0e58-5599-43c8-ac33-03595ae81da9 node DatanodeRegistration(127.0.0.1:42473, datanodeUuid=6543de5f-8518-406c-be1d-2c4860a53ea5, infoPort=45975, infoSecurePort=0, ipcPort=38545, storageInfo=lv=-57;cid=testClusterID;nsid=1732356154;c=1731457755480), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:16,321 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbd4474cae6ae69a6 with lease ID 0x103aac0a3991332e: Processing first storage report for DS-8bb00cc8-ab2b-426d-ad9d-f663cbe44821 from datanode DatanodeRegistration(127.0.0.1:42473, datanodeUuid=6543de5f-8518-406c-be1d-2c4860a53ea5, infoPort=45975, infoSecurePort=0, ipcPort=38545, storageInfo=lv=-57;cid=testClusterID;nsid=1732356154;c=1731457755480) 2024-11-13T00:29:16,321 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbd4474cae6ae69a6 with lease ID 0x103aac0a3991332e: from storage DS-8bb00cc8-ab2b-426d-ad9d-f663cbe44821 node DatanodeRegistration(127.0.0.1:42473, datanodeUuid=6543de5f-8518-406c-be1d-2c4860a53ea5, infoPort=45975, infoSecurePort=0, ipcPort=38545, storageInfo=lv=-57;cid=testClusterID;nsid=1732356154;c=1731457755480), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:16,337 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3 2024-11-13T00:29:16,340 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/zookeeper_0, clientPort=50605, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T00:29:16,341 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50605 2024-11-13T00:29:16,341 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:16,343 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:16,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:29:16,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:29:16,357 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84 with version=8 2024-11-13T00:29:16,357 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/hbase-staging 2024-11-13T00:29:16,360 INFO [Time-limited test {}] client.ConnectionUtils(128): master/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:29:16,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:16,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:16,360 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:29:16,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:16,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:29:16,360 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T00:29:16,360 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:29:16,362 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46591 2024-11-13T00:29:16,363 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46591 connecting to ZooKeeper ensemble=127.0.0.1:50605 2024-11-13T00:29:16,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:465910x0, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:29:16,370 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46591-0x1010eba97420000 connected 2024-11-13T00:29:16,386 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:16,388 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:16,391 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:16,391 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84, hbase.cluster.distributed=false 2024-11-13T00:29:16,393 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:29:16,393 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46591 2024-11-13T00:29:16,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46591 2024-11-13T00:29:16,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46591 2024-11-13T00:29:16,394 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46591 2024-11-13T00:29:16,395 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46591 2024-11-13T00:29:16,415 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:29:16,415 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:16,415 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:16,415 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:29:16,415 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:16,415 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:29:16,415 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T00:29:16,416 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:29:16,416 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43209 2024-11-13T00:29:16,418 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43209 connecting to ZooKeeper ensemble=127.0.0.1:50605 2024-11-13T00:29:16,419 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:16,421 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:16,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432090x0, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:29:16,428 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:432090x0, quorum=127.0.0.1:50605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:16,428 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43209-0x1010eba97420001 connected 2024-11-13T00:29:16,428 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T00:29:16,430 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T00:29:16,431 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T00:29:16,433 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:29:16,436 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43209 2024-11-13T00:29:16,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43209 2024-11-13T00:29:16,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43209 2024-11-13T00:29:16,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43209 2024-11-13T00:29:16,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43209 2024-11-13T00:29:16,472 DEBUG [M:0;cb28580903fb:46591 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;cb28580903fb:46591 2024-11-13T00:29:16,473 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/cb28580903fb,46591,1731457756359 2024-11-13T00:29:16,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:29:16,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:29:16,476 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/cb28580903fb,46591,1731457756359 2024-11-13T00:29:16,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T00:29:16,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:16,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:16,481 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T00:29:16,482 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/cb28580903fb,46591,1731457756359 from backup master directory 2024-11-13T00:29:16,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:29:16,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/cb28580903fb,46591,1731457756359 2024-11-13T00:29:16,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:29:16,484 WARN [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:29:16,484 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=cb28580903fb,46591,1731457756359 2024-11-13T00:29:16,494 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/hbase.id] with ID: 65f42912-91bf-4737-afc9-57bffb50a1d5 2024-11-13T00:29:16,494 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/.tmp/hbase.id 2024-11-13T00:29:16,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:29:16,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:29:16,503 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/.tmp/hbase.id]:[hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/hbase.id] 2024-11-13T00:29:16,524 INFO [master/cb28580903fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:16,524 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T00:29:16,526 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-13T00:29:16,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:16,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:16,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:29:16,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:29:16,544 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:29:16,545 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T00:29:16,545 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:29:16,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:29:16,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:29:16,558 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store 2024-11-13T00:29:16,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:29:16,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:29:16,571 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:29:16,571 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:29:16,571 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:16,572 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:16,572 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:29:16,572 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:16,572 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:16,572 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457756571Disabling compacts and flushes for region at 1731457756571Disabling writes for close at 1731457756572 (+1 ms)Writing region close event to WAL at 1731457756572Closed at 1731457756572 2024-11-13T00:29:16,573 WARN [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/.initializing 2024-11-13T00:29:16,573 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/WALs/cb28580903fb,46591,1731457756359 2024-11-13T00:29:16,577 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C46591%2C1731457756359, suffix=, logDir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/WALs/cb28580903fb,46591,1731457756359, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/oldWALs, maxLogs=10 2024-11-13T00:29:16,577 INFO [master/cb28580903fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C46591%2C1731457756359.1731457756577 2024-11-13T00:29:16,583 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/WALs/cb28580903fb,46591,1731457756359/cb28580903fb%2C46591%2C1731457756359.1731457756577 2024-11-13T00:29:16,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45975:45975),(127.0.0.1/127.0.0.1:40447:40447)] 2024-11-13T00:29:16,589 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:29:16,589 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:29:16,590 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,590 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T00:29:16,593 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:16,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:16,594 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,595 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T00:29:16,595 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:16,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:29:16,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T00:29:16,598 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:16,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:29:16,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T00:29:16,600 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:16,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:29:16,601 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,602 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,602 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,604 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,604 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,604 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T00:29:16,606 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:16,608 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:29:16,609 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=830438, jitterRate=0.05595669150352478}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T00:29:16,610 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731457756590Initializing all the Stores at 1731457756591 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457756591Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457756591Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457756591Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457756591Cleaning up temporary data from old regions at 1731457756604 (+13 ms)Region opened successfully at 1731457756610 (+6 ms) 2024-11-13T00:29:16,610 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T00:29:16,615 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b4a233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:29:16,616 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T00:29:16,616 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T00:29:16,616 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T00:29:16,616 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T00:29:16,617 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T00:29:16,617 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T00:29:16,617 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T00:29:16,621 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T00:29:16,622 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T00:29:16,623 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T00:29:16,623 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T00:29:16,624 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T00:29:16,625 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T00:29:16,626 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T00:29:16,627 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T00:29:16,629 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T00:29:16,630 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T00:29:16,631 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T00:29:16,633 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T00:29:16,635 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T00:29:16,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:16,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:16,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:16,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:16,637 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=cb28580903fb,46591,1731457756359, sessionid=0x1010eba97420000, setting cluster-up flag (Was=false) 2024-11-13T00:29:16,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:16,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:16,650 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T00:29:16,651 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,46591,1731457756359 2024-11-13T00:29:16,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:16,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:16,661 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T00:29:16,662 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,46591,1731457756359 2024-11-13T00:29:16,663 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T00:29:16,665 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T00:29:16,665 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T00:29:16,665 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T00:29:16,666 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: cb28580903fb,46591,1731457756359 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T00:29:16,667 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:29:16,667 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:29:16,667 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:29:16,667 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:29:16,667 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/cb28580903fb:0, corePoolSize=10, maxPoolSize=10 2024-11-13T00:29:16,667 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,667 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:29:16,667 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,668 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731457786668 2024-11-13T00:29:16,668 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T00:29:16,668 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T00:29:16,669 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T00:29:16,669 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T00:29:16,669 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T00:29:16,669 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T00:29:16,669 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,669 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T00:29:16,669 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T00:29:16,669 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T00:29:16,669 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:29:16,670 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T00:29:16,670 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T00:29:16,670 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T00:29:16,670 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457756670,5,FailOnTimeoutGroup] 2024-11-13T00:29:16,670 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457756670,5,FailOnTimeoutGroup] 2024-11-13T00:29:16,670 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,670 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T00:29:16,670 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,670 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,671 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:16,671 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T00:29:16,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:29:16,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:29:16,680 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T00:29:16,680 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84 2024-11-13T00:29:16,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:29:16,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:29:16,688 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:29:16,689 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:29:16,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:29:16,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:16,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:16,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:29:16,693 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:29:16,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:16,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:16,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:29:16,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:29:16,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:16,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:16,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:29:16,697 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:29:16,697 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:16,697 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:16,697 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:29:16,698 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740 2024-11-13T00:29:16,698 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740 2024-11-13T00:29:16,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:29:16,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:29:16,700 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:29:16,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:29:16,704 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:29:16,704 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=712544, jitterRate=-0.09395396709442139}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:29:16,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731457756688Initializing all the Stores at 1731457756689 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457756689Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457756689Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457756689Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457756689Cleaning up temporary data from old regions at 1731457756700 (+11 ms)Region opened successfully at 1731457756706 (+6 ms) 2024-11-13T00:29:16,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:29:16,706 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:29:16,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:29:16,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:29:16,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:29:16,706 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:29:16,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457756706Disabling compacts and flushes for region at 1731457756706Disabling writes for close at 1731457756706Writing region close event to WAL at 1731457756706Closed at 1731457756706 2024-11-13T00:29:16,708 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:29:16,708 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T00:29:16,708 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T00:29:16,710 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:29:16,711 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T00:29:16,750 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(746): ClusterId : 65f42912-91bf-4737-afc9-57bffb50a1d5 2024-11-13T00:29:16,750 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T00:29:16,753 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T00:29:16,753 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T00:29:16,755 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T00:29:16,756 DEBUG [RS:0;cb28580903fb:43209 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c12a66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:29:16,769 DEBUG [RS:0;cb28580903fb:43209 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;cb28580903fb:43209 2024-11-13T00:29:16,769 INFO [RS:0;cb28580903fb:43209 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T00:29:16,769 INFO [RS:0;cb28580903fb:43209 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T00:29:16,769 DEBUG [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T00:29:16,769 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(2659): reportForDuty to master=cb28580903fb,46591,1731457756359 with port=43209, startcode=1731457756414 2024-11-13T00:29:16,770 DEBUG [RS:0;cb28580903fb:43209 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T00:29:16,772 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43569, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T00:29:16,773 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46591 {}] master.ServerManager(363): Checking decommissioned status of RegionServer cb28580903fb,43209,1731457756414 2024-11-13T00:29:16,773 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46591 {}] master.ServerManager(517): Registering regionserver=cb28580903fb,43209,1731457756414 2024-11-13T00:29:16,776 DEBUG [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84 2024-11-13T00:29:16,776 DEBUG [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39361 2024-11-13T00:29:16,776 DEBUG [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T00:29:16,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:29:16,778 DEBUG [RS:0;cb28580903fb:43209 {}] zookeeper.ZKUtil(111): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/cb28580903fb,43209,1731457756414 2024-11-13T00:29:16,779 WARN [RS:0;cb28580903fb:43209 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:29:16,779 INFO [RS:0;cb28580903fb:43209 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:29:16,779 DEBUG [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/WALs/cb28580903fb,43209,1731457756414 2024-11-13T00:29:16,779 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [cb28580903fb,43209,1731457756414] 2024-11-13T00:29:16,784 INFO [RS:0;cb28580903fb:43209 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T00:29:16,786 INFO [RS:0;cb28580903fb:43209 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T00:29:16,787 INFO [RS:0;cb28580903fb:43209 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T00:29:16,787 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,787 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T00:29:16,788 INFO [RS:0;cb28580903fb:43209 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T00:29:16,788 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,788 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,788 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,788 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,788 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,788 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,788 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:29:16,788 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,788 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,788 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,788 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,789 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,789 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:16,789 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:29:16,789 DEBUG [RS:0;cb28580903fb:43209 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:29:16,789 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,789 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,789 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,789 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,789 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,789 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,43209,1731457756414-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:29:16,805 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T00:29:16,805 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,43209,1731457756414-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,805 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,805 INFO [RS:0;cb28580903fb:43209 {}] regionserver.Replication(171): cb28580903fb,43209,1731457756414 started 2024-11-13T00:29:16,821 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:16,821 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(1482): Serving as cb28580903fb,43209,1731457756414, RpcServer on cb28580903fb/172.17.0.2:43209, sessionid=0x1010eba97420001 2024-11-13T00:29:16,821 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T00:29:16,821 DEBUG [RS:0;cb28580903fb:43209 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager cb28580903fb,43209,1731457756414 2024-11-13T00:29:16,821 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,43209,1731457756414' 2024-11-13T00:29:16,821 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T00:29:16,822 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T00:29:16,823 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T00:29:16,823 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T00:29:16,823 DEBUG [RS:0;cb28580903fb:43209 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager cb28580903fb,43209,1731457756414 2024-11-13T00:29:16,823 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,43209,1731457756414' 2024-11-13T00:29:16,823 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T00:29:16,823 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T00:29:16,824 DEBUG [RS:0;cb28580903fb:43209 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T00:29:16,824 INFO [RS:0;cb28580903fb:43209 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T00:29:16,824 INFO [RS:0;cb28580903fb:43209 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T00:29:16,862 WARN [cb28580903fb:46591 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T00:29:16,927 INFO [RS:0;cb28580903fb:43209 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C43209%2C1731457756414, suffix=, logDir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/WALs/cb28580903fb,43209,1731457756414, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/oldWALs, maxLogs=32 2024-11-13T00:29:16,929 INFO [RS:0;cb28580903fb:43209 {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C43209%2C1731457756414.1731457756928 2024-11-13T00:29:16,936 INFO [RS:0;cb28580903fb:43209 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/WALs/cb28580903fb,43209,1731457756414/cb28580903fb%2C43209%2C1731457756414.1731457756928 2024-11-13T00:29:16,938 DEBUG [RS:0;cb28580903fb:43209 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45975:45975),(127.0.0.1/127.0.0.1:40447:40447)] 2024-11-13T00:29:17,112 DEBUG [cb28580903fb:46591 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T00:29:17,113 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=cb28580903fb,43209,1731457756414 2024-11-13T00:29:17,115 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,43209,1731457756414, state=OPENING 2024-11-13T00:29:17,117 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T00:29:17,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:17,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:17,120 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:29:17,120 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:29:17,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,43209,1731457756414}] 2024-11-13T00:29:17,122 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:29:17,275 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T00:29:17,278 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50861, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T00:29:17,283 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T00:29:17,284 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:29:17,286 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C43209%2C1731457756414.meta, suffix=.meta, logDir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/WALs/cb28580903fb,43209,1731457756414, archiveDir=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/oldWALs, maxLogs=32 2024-11-13T00:29:17,289 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C43209%2C1731457756414.meta.1731457757289.meta 2024-11-13T00:29:17,297 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/WALs/cb28580903fb,43209,1731457756414/cb28580903fb%2C43209%2C1731457756414.meta.1731457757289.meta 2024-11-13T00:29:17,298 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45975:45975),(127.0.0.1/127.0.0.1:40447:40447)] 2024-11-13T00:29:17,298 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:29:17,299 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T00:29:17,299 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T00:29:17,299 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T00:29:17,299 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T00:29:17,299 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:29:17,299 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T00:29:17,300 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T00:29:17,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:29:17,303 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:29:17,303 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:17,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:17,304 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:29:17,305 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:29:17,305 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:17,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:17,306 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:29:17,307 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:29:17,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:17,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:17,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:29:17,309 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:29:17,310 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:17,310 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:17,310 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:29:17,312 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740 2024-11-13T00:29:17,314 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740 2024-11-13T00:29:17,315 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:29:17,315 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:29:17,316 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:29:17,318 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:29:17,320 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809091, jitterRate=0.02881339192390442}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:29:17,320 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T00:29:17,321 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731457757300Writing region info on filesystem at 1731457757300Initializing all the Stores at 1731457757301 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457757301Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457757301Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457757301Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457757301Cleaning up temporary data from old regions at 1731457757315 (+14 ms)Running coprocessor post-open hooks at 1731457757320 (+5 ms)Region opened successfully at 1731457757321 (+1 ms) 2024-11-13T00:29:17,323 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731457757275 2024-11-13T00:29:17,326 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T00:29:17,326 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T00:29:17,328 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,43209,1731457756414 2024-11-13T00:29:17,330 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,43209,1731457756414, state=OPEN 2024-11-13T00:29:17,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:29:17,338 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:29:17,338 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=cb28580903fb,43209,1731457756414 2024-11-13T00:29:17,338 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:29:17,338 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:29:17,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T00:29:17,342 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,43209,1731457756414 in 218 msec 2024-11-13T00:29:17,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T00:29:17,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 634 msec 2024-11-13T00:29:17,348 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:29:17,348 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T00:29:17,350 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:29:17,350 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,43209,1731457756414, seqNum=-1] 2024-11-13T00:29:17,351 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:29:17,353 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55715, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:29:17,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 697 msec 2024-11-13T00:29:17,366 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731457757366, completionTime=-1 2024-11-13T00:29:17,366 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T00:29:17,366 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T00:29:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T00:29:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731457817369 2024-11-13T00:29:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731457877369 2024-11-13T00:29:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T00:29:17,370 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46591,1731457756359-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:17,370 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46591,1731457756359-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:17,370 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46591,1731457756359-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:17,370 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-cb28580903fb:46591, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:17,370 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:17,372 DEBUG [master/cb28580903fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T00:29:17,376 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:17,381 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.897sec 2024-11-13T00:29:17,382 INFO [master/cb28580903fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T00:29:17,382 INFO [master/cb28580903fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T00:29:17,382 INFO [master/cb28580903fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T00:29:17,382 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T00:29:17,382 INFO [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T00:29:17,382 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46591,1731457756359-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:29:17,382 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46591,1731457756359-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T00:29:17,385 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T00:29:17,385 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T00:29:17,385 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46591,1731457756359-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:17,451 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6544715e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:29:17,451 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request cb28580903fb,46591,-1 for getting cluster id 2024-11-13T00:29:17,451 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T00:29:17,453 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '65f42912-91bf-4737-afc9-57bffb50a1d5' 2024-11-13T00:29:17,454 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T00:29:17,455 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "65f42912-91bf-4737-afc9-57bffb50a1d5" 2024-11-13T00:29:17,455 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367dc70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:29:17,455 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [cb28580903fb,46591,-1] 2024-11-13T00:29:17,456 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T00:29:17,457 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:17,460 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52522, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T00:29:17,461 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a2acd0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:29:17,461 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:29:17,463 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,43209,1731457756414, seqNum=-1] 2024-11-13T00:29:17,463 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:29:17,465 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55170, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:29:17,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=cb28580903fb,46591,1731457756359 2024-11-13T00:29:17,469 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:17,473 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T00:29:17,473 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T00:29:17,473 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:29:17,474 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:29:17,474 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:17,474 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:17,474 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T00:29:17,474 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T00:29:17,474 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1712408714, stopped=false 2024-11-13T00:29:17,474 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=cb28580903fb,46591,1731457756359 2024-11-13T00:29:17,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:17,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:17,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:17,476 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:29:17,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:17,477 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:29:17,477 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:29:17,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:17,477 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'cb28580903fb,43209,1731457756414' ***** 2024-11-13T00:29:17,477 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T00:29:17,477 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:17,478 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T00:29:17,478 INFO [RS:0;cb28580903fb:43209 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T00:29:17,478 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T00:29:17,478 INFO [RS:0;cb28580903fb:43209 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T00:29:17,478 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(959): stopping server cb28580903fb,43209,1731457756414 2024-11-13T00:29:17,478 INFO [RS:0;cb28580903fb:43209 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:29:17,478 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:17,478 INFO [RS:0;cb28580903fb:43209 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;cb28580903fb:43209. 2024-11-13T00:29:17,478 DEBUG [RS:0;cb28580903fb:43209 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:29:17,478 DEBUG [RS:0;cb28580903fb:43209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:17,478 INFO [RS:0;cb28580903fb:43209 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T00:29:17,478 INFO [RS:0;cb28580903fb:43209 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T00:29:17,479 INFO [RS:0;cb28580903fb:43209 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T00:29:17,479 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T00:29:17,479 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T00:29:17,479 DEBUG [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-13T00:29:17,479 DEBUG [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T00:29:17,479 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:29:17,480 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:29:17,480 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:29:17,480 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:29:17,480 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:29:17,480 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-13T00:29:17,502 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740/.tmp/ns/a45b7046f3ed4d35902508be4662200b is 43, key is default/ns:d/1731457757354/Put/seqid=0 2024-11-13T00:29:17,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741835_1011 (size=5153) 2024-11-13T00:29:17,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741835_1011 (size=5153) 2024-11-13T00:29:17,509 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740/.tmp/ns/a45b7046f3ed4d35902508be4662200b 2024-11-13T00:29:17,518 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740/.tmp/ns/a45b7046f3ed4d35902508be4662200b as hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740/ns/a45b7046f3ed4d35902508be4662200b 2024-11-13T00:29:17,527 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740/ns/a45b7046f3ed4d35902508be4662200b, entries=2, sequenceid=6, filesize=5.0 K 2024-11-13T00:29:17,529 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 49ms, sequenceid=6, compaction requested=false 2024-11-13T00:29:17,529 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T00:29:17,536 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T00:29:17,537 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:29:17,537 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:29:17,537 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457757479Running coprocessor pre-close hooks at 1731457757479Disabling compacts and flushes for region at 1731457757479Disabling writes for close at 1731457757480 (+1 ms)Obtaining lock to block concurrent updates at 1731457757480Preparing flush snapshotting stores in 1588230740 at 1731457757480Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731457757480Flushing stores of hbase:meta,,1.1588230740 at 1731457757481 (+1 ms)Flushing 1588230740/ns: creating writer at 1731457757482 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731457757502 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1731457757502Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b40ade6: reopening flushed file at 1731457757517 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 49ms, sequenceid=6, compaction requested=false at 1731457757529 (+12 ms)Writing region close event to WAL at 1731457757531 (+2 ms)Running coprocessor post-close hooks at 1731457757537 (+6 ms)Closed at 1731457757537 2024-11-13T00:29:17,538 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T00:29:17,680 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(976): stopping server cb28580903fb,43209,1731457756414; all regions closed. 2024-11-13T00:29:17,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:17,681 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:17,681 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:17,681 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:17,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:17,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741834_1010 (size=1152) 2024-11-13T00:29:17,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741834_1010 (size=1152) 2024-11-13T00:29:17,691 DEBUG [RS:0;cb28580903fb:43209 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/oldWALs 2024-11-13T00:29:17,691 INFO [RS:0;cb28580903fb:43209 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C43209%2C1731457756414.meta:.meta(num 1731457757289) 2024-11-13T00:29:17,691 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:17,691 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:17,692 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:17,692 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:17,692 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:17,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741833_1009 (size=93) 2024-11-13T00:29:17,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741833_1009 (size=93) 2024-11-13T00:29:17,708 DEBUG [RS:0;cb28580903fb:43209 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/oldWALs 2024-11-13T00:29:17,708 INFO [RS:0;cb28580903fb:43209 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C43209%2C1731457756414:(num 1731457756928) 2024-11-13T00:29:17,708 DEBUG [RS:0;cb28580903fb:43209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:17,708 INFO [RS:0;cb28580903fb:43209 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:29:17,709 INFO [RS:0;cb28580903fb:43209 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:29:17,709 INFO [RS:0;cb28580903fb:43209 {}] hbase.ChoreService(370): Chore service for: regionserver/cb28580903fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T00:29:17,709 INFO [RS:0;cb28580903fb:43209 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:29:17,709 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:29:17,709 INFO [RS:0;cb28580903fb:43209 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43209 2024-11-13T00:29:17,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/cb28580903fb,43209,1731457756414 2024-11-13T00:29:17,713 INFO [RS:0;cb28580903fb:43209 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:29:17,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:29:17,715 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [cb28580903fb,43209,1731457756414] 2024-11-13T00:29:17,717 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/cb28580903fb,43209,1731457756414 already deleted, retry=false 2024-11-13T00:29:17,717 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; cb28580903fb,43209,1731457756414 expired; onlineServers=0 2024-11-13T00:29:17,717 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'cb28580903fb,46591,1731457756359' ***** 2024-11-13T00:29:17,717 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T00:29:17,717 INFO [M:0;cb28580903fb:46591 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:29:17,718 INFO [M:0;cb28580903fb:46591 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:29:17,718 DEBUG [M:0;cb28580903fb:46591 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T00:29:17,718 DEBUG [M:0;cb28580903fb:46591 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T00:29:17,718 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T00:29:17,718 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457756670 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457756670,5,FailOnTimeoutGroup] 2024-11-13T00:29:17,718 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457756670 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457756670,5,FailOnTimeoutGroup] 2024-11-13T00:29:17,718 INFO [M:0;cb28580903fb:46591 {}] hbase.ChoreService(370): Chore service for: master/cb28580903fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T00:29:17,718 INFO [M:0;cb28580903fb:46591 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:29:17,718 DEBUG [M:0;cb28580903fb:46591 {}] master.HMaster(1795): Stopping service threads 2024-11-13T00:29:17,718 INFO [M:0;cb28580903fb:46591 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T00:29:17,718 INFO [M:0;cb28580903fb:46591 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:29:17,719 INFO [M:0;cb28580903fb:46591 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T00:29:17,719 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T00:29:17,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T00:29:17,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:17,721 DEBUG [M:0;cb28580903fb:46591 {}] zookeeper.ZKUtil(347): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T00:29:17,721 WARN [M:0;cb28580903fb:46591 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T00:29:17,722 INFO [M:0;cb28580903fb:46591 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/.lastflushedseqids 2024-11-13T00:29:17,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741836_1012 (size=99) 2024-11-13T00:29:17,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741836_1012 (size=99) 2024-11-13T00:29:17,742 INFO [M:0;cb28580903fb:46591 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T00:29:17,743 INFO [M:0;cb28580903fb:46591 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T00:29:17,743 DEBUG [M:0;cb28580903fb:46591 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:29:17,743 INFO [M:0;cb28580903fb:46591 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:17,744 DEBUG [M:0;cb28580903fb:46591 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:17,744 DEBUG [M:0;cb28580903fb:46591 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:29:17,744 DEBUG [M:0;cb28580903fb:46591 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:17,744 INFO [M:0;cb28580903fb:46591 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-13T00:29:17,774 DEBUG [M:0;cb28580903fb:46591 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6794e44cc9b04be5a272c02519cdfa45 is 82, key is hbase:meta,,1/info:regioninfo/1731457757328/Put/seqid=0 2024-11-13T00:29:17,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741837_1013 (size=5672) 2024-11-13T00:29:17,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741837_1013 (size=5672) 2024-11-13T00:29:17,784 INFO [M:0;cb28580903fb:46591 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6794e44cc9b04be5a272c02519cdfa45 2024-11-13T00:29:17,812 DEBUG [M:0;cb28580903fb:46591 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a8303c3b9f824ea09dee1f612cfeafc9 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731457757365/Put/seqid=0 2024-11-13T00:29:17,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:29:17,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43209-0x1010eba97420001, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:29:17,815 INFO [RS:0;cb28580903fb:43209 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:29:17,815 INFO [RS:0;cb28580903fb:43209 {}] regionserver.HRegionServer(1031): Exiting; stopping=cb28580903fb,43209,1731457756414; zookeeper connection closed. 2024-11-13T00:29:17,815 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2bfc94dc {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2bfc94dc 2024-11-13T00:29:17,816 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T00:29:17,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741838_1014 (size=5275) 2024-11-13T00:29:17,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741838_1014 (size=5275) 2024-11-13T00:29:17,826 INFO [M:0;cb28580903fb:46591 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a8303c3b9f824ea09dee1f612cfeafc9 2024-11-13T00:29:17,863 DEBUG [M:0;cb28580903fb:46591 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6439202c8cb64c88b85fa435d6d432ae is 69, key is cb28580903fb,43209,1731457756414/rs:state/1731457756774/Put/seqid=0 2024-11-13T00:29:17,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741839_1015 (size=5156) 2024-11-13T00:29:17,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741839_1015 (size=5156) 2024-11-13T00:29:17,872 INFO [M:0;cb28580903fb:46591 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6439202c8cb64c88b85fa435d6d432ae 2024-11-13T00:29:17,903 DEBUG [M:0;cb28580903fb:46591 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/78c7d5125f2448178e9810ab32756975 is 52, key is load_balancer_on/state:d/1731457757471/Put/seqid=0 2024-11-13T00:29:17,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741840_1016 (size=5056) 2024-11-13T00:29:17,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741840_1016 (size=5056) 2024-11-13T00:29:17,913 INFO [M:0;cb28580903fb:46591 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/78c7d5125f2448178e9810ab32756975 2024-11-13T00:29:17,922 DEBUG [M:0;cb28580903fb:46591 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6794e44cc9b04be5a272c02519cdfa45 as hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6794e44cc9b04be5a272c02519cdfa45 2024-11-13T00:29:17,931 INFO [M:0;cb28580903fb:46591 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6794e44cc9b04be5a272c02519cdfa45, entries=8, sequenceid=29, filesize=5.5 K 2024-11-13T00:29:17,932 DEBUG [M:0;cb28580903fb:46591 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a8303c3b9f824ea09dee1f612cfeafc9 as hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a8303c3b9f824ea09dee1f612cfeafc9 2024-11-13T00:29:17,940 INFO [M:0;cb28580903fb:46591 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a8303c3b9f824ea09dee1f612cfeafc9, entries=3, sequenceid=29, filesize=5.2 K 2024-11-13T00:29:17,941 DEBUG [M:0;cb28580903fb:46591 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6439202c8cb64c88b85fa435d6d432ae as hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6439202c8cb64c88b85fa435d6d432ae 2024-11-13T00:29:17,951 INFO [M:0;cb28580903fb:46591 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6439202c8cb64c88b85fa435d6d432ae, entries=1, sequenceid=29, filesize=5.0 K 2024-11-13T00:29:17,953 DEBUG [M:0;cb28580903fb:46591 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/78c7d5125f2448178e9810ab32756975 as hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/78c7d5125f2448178e9810ab32756975 2024-11-13T00:29:17,960 INFO [M:0;cb28580903fb:46591 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39361/user/jenkins/test-data/1471d553-8348-2caf-e00e-bdee45eeef84/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/78c7d5125f2448178e9810ab32756975, entries=1, sequenceid=29, filesize=4.9 K 2024-11-13T00:29:17,962 INFO [M:0;cb28580903fb:46591 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 218ms, sequenceid=29, compaction requested=false 2024-11-13T00:29:17,996 INFO [M:0;cb28580903fb:46591 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:17,996 DEBUG [M:0;cb28580903fb:46591 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457757743Disabling compacts and flushes for region at 1731457757743Disabling writes for close at 1731457757744 (+1 ms)Obtaining lock to block concurrent updates at 1731457757744Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731457757744Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731457757745 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731457757747 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731457757747Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731457757774 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731457757774Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731457757791 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731457757811 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731457757811Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731457757833 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731457757862 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731457757862Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731457757880 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731457757902 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731457757902Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11c4a41d: reopening flushed file at 1731457757921 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11fe1d94: reopening flushed file at 1731457757931 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d77f6c7: reopening flushed file at 1731457757940 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ea41b32: reopening flushed file at 1731457757951 (+11 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 218ms, sequenceid=29, compaction requested=false at 1731457757962 (+11 ms)Writing region close event to WAL at 1731457757996 (+34 ms)Closed at 1731457757996 2024-11-13T00:29:17,999 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:18,000 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:18,000 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:18,000 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:18,000 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:18,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34995 is added to blk_1073741830_1006 (size=10311) 2024-11-13T00:29:18,012 INFO [M:0;cb28580903fb:46591 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T00:29:18,012 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:29:18,012 INFO [M:0;cb28580903fb:46591 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46591 2024-11-13T00:29:18,012 INFO [M:0;cb28580903fb:46591 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:29:18,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42473 is added to blk_1073741830_1006 (size=10311) 2024-11-13T00:29:18,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:29:18,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46591-0x1010eba97420000, quorum=127.0.0.1:50605, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:29:18,116 INFO [M:0;cb28580903fb:46591 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:29:18,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@d071559{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:18,130 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ece158b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:29:18,130 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:29:18,131 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d87dfcd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:29:18,131 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74fcc9e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/hadoop.log.dir/,STOPPED} 2024-11-13T00:29:18,133 WARN [BP-549882482-172.17.0.2-1731457755480 heartbeating to localhost/127.0.0.1:39361 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:29:18,133 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:29:18,133 WARN [BP-549882482-172.17.0.2-1731457755480 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-549882482-172.17.0.2-1731457755480 (Datanode Uuid 6543de5f-8518-406c-be1d-2c4860a53ea5) service to localhost/127.0.0.1:39361 2024-11-13T00:29:18,133 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:29:18,134 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/data/data3/current/BP-549882482-172.17.0.2-1731457755480 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:18,135 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/data/data4/current/BP-549882482-172.17.0.2-1731457755480 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:18,135 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:29:18,147 WARN [BP-549882482-172.17.0.2-1731457755480 heartbeating to localhost/127.0.0.1:39361 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-549882482-172.17.0.2-1731457755480 (Datanode Uuid dfbfb49f-5683-4d86-8bee-a0c650df1f89) service to localhost/127.0.0.1:39361 2024-11-13T00:29:18,148 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/data/data1/current/BP-549882482-172.17.0.2-1731457755480 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:18,148 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/cluster_e31acad2-208e-f92d-9245-ea2733991be8/data/data2/current/BP-549882482-172.17.0.2-1731457755480 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:18,149 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a90f125{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:18,150 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b8beccb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:29:18,150 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:29:18,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@695feae9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:29:18,150 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@232ba44{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/hadoop.log.dir/,STOPPED} 2024-11-13T00:29:18,154 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:29:18,162 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c790f33{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:29:18,163 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@488dcf65{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:29:18,163 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:29:18,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c2cf0b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:29:18,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19bf40f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/hadoop.log.dir/,STOPPED} 2024-11-13T00:29:18,171 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T00:29:18,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T00:29:18,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T00:29:18,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/hadoop.log.dir so I do NOT create it in target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec 2024-11-13T00:29:18,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/70c4de87-ce39-31b0-a6cc-d80ffc185cb3/hadoop.tmp.dir so I do NOT create it in target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec 2024-11-13T00:29:18,191 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca, deleteOnExit=true 2024-11-13T00:29:18,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T00:29:18,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/test.cache.data in system properties and HBase conf 2024-11-13T00:29:18,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T00:29:18,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir in system properties and HBase conf 2024-11-13T00:29:18,191 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T00:29:18,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T00:29:18,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T00:29:18,192 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T00:29:18,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:29:18,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:29:18,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T00:29:18,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:29:18,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T00:29:18,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T00:29:18,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:29:18,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:29:18,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T00:29:18,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/nfs.dump.dir in system properties and HBase conf 2024-11-13T00:29:18,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/java.io.tmpdir in system properties and HBase conf 2024-11-13T00:29:18,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:29:18,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T00:29:18,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T00:29:18,209 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:29:18,297 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:18,305 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:29:18,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:29:18,313 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:29:18,313 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:29:18,315 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:18,316 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3789f604{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:29:18,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7447e09f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:29:18,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@618abfb4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/java.io.tmpdir/jetty-localhost-34589-hadoop-hdfs-3_4_1-tests_jar-_-any-14088065408130409716/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:29:18,447 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1585275{HTTP/1.1, (http/1.1)}{localhost:34589} 2024-11-13T00:29:18,448 INFO [Time-limited test {}] server.Server(415): Started @104296ms 2024-11-13T00:29:18,464 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:29:18,570 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:18,573 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:29:18,574 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:29:18,574 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:29:18,574 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:29:18,574 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@778fdefb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:29:18,575 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6cb8e75e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:29:18,696 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24d45158{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/java.io.tmpdir/jetty-localhost-46541-hadoop-hdfs-3_4_1-tests_jar-_-any-10450934314452821026/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:18,697 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76afaf39{HTTP/1.1, (http/1.1)}{localhost:46541} 2024-11-13T00:29:18,697 INFO [Time-limited test {}] server.Server(415): Started @104545ms 2024-11-13T00:29:18,698 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:29:18,749 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:18,754 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:29:18,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:29:18,757 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:29:18,757 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:29:18,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30985369{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:29:18,758 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c9fa828{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:29:18,790 INFO [regionserver/cb28580903fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:29:18,828 WARN [Thread-655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data1/current/BP-155329190-172.17.0.2-1731457758228/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:18,829 WARN [Thread-656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data2/current/BP-155329190-172.17.0.2-1731457758228/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:18,860 WARN [Thread-634 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:29:18,865 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd41f893921e7269a with lease ID 0x8693d6b9cb094356: Processing first storage report for DS-41660bf4-b5b7-4b75-9656-e64806834d3c from datanode DatanodeRegistration(127.0.0.1:39667, datanodeUuid=7111f6bf-a4eb-45ef-b0e8-709db178ad7d, infoPort=34993, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228) 2024-11-13T00:29:18,865 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd41f893921e7269a with lease ID 0x8693d6b9cb094356: from storage DS-41660bf4-b5b7-4b75-9656-e64806834d3c node DatanodeRegistration(127.0.0.1:39667, datanodeUuid=7111f6bf-a4eb-45ef-b0e8-709db178ad7d, infoPort=34993, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:18,868 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd41f893921e7269a with lease ID 0x8693d6b9cb094356: Processing first storage report for DS-f49a6fa1-3103-4d6a-b778-043c581b1bc3 from datanode DatanodeRegistration(127.0.0.1:39667, datanodeUuid=7111f6bf-a4eb-45ef-b0e8-709db178ad7d, infoPort=34993, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228) 2024-11-13T00:29:18,868 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd41f893921e7269a with lease ID 0x8693d6b9cb094356: from storage DS-f49a6fa1-3103-4d6a-b778-043c581b1bc3 node DatanodeRegistration(127.0.0.1:39667, datanodeUuid=7111f6bf-a4eb-45ef-b0e8-709db178ad7d, infoPort=34993, infoSecurePort=0, ipcPort=33819, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:18,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3893f69{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/java.io.tmpdir/jetty-localhost-36453-hadoop-hdfs-3_4_1-tests_jar-_-any-7763486619652433642/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:18,919 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@206fa4ee{HTTP/1.1, (http/1.1)}{localhost:36453} 2024-11-13T00:29:18,919 INFO [Time-limited test {}] server.Server(415): Started @104767ms 2024-11-13T00:29:18,920 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:29:19,059 WARN [Thread-681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data3/current/BP-155329190-172.17.0.2-1731457758228/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:19,060 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data4/current/BP-155329190-172.17.0.2-1731457758228/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:19,117 WARN [Thread-670 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:29:19,121 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x126e771538b72cff with lease ID 0x8693d6b9cb094357: Processing first storage report for DS-0a2d533a-4bec-41d8-9594-717d0345b537 from datanode DatanodeRegistration(127.0.0.1:35189, datanodeUuid=15755521-cc20-4ae7-8d60-99b27f2bc8a1, infoPort=43759, infoSecurePort=0, ipcPort=42013, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228) 2024-11-13T00:29:19,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x126e771538b72cff with lease ID 0x8693d6b9cb094357: from storage DS-0a2d533a-4bec-41d8-9594-717d0345b537 node DatanodeRegistration(127.0.0.1:35189, datanodeUuid=15755521-cc20-4ae7-8d60-99b27f2bc8a1, infoPort=43759, infoSecurePort=0, ipcPort=42013, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T00:29:19,121 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x126e771538b72cff with lease ID 0x8693d6b9cb094357: Processing first storage report for DS-c0d089db-e3f0-432f-bc7e-76b3938bf782 from datanode DatanodeRegistration(127.0.0.1:35189, datanodeUuid=15755521-cc20-4ae7-8d60-99b27f2bc8a1, infoPort=43759, infoSecurePort=0, ipcPort=42013, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228) 2024-11-13T00:29:19,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x126e771538b72cff with lease ID 0x8693d6b9cb094357: from storage DS-c0d089db-e3f0-432f-bc7e-76b3938bf782 node DatanodeRegistration(127.0.0.1:35189, datanodeUuid=15755521-cc20-4ae7-8d60-99b27f2bc8a1, infoPort=43759, infoSecurePort=0, ipcPort=42013, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:19,191 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec 2024-11-13T00:29:19,195 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/zookeeper_0, clientPort=62272, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T00:29:19,196 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62272 2024-11-13T00:29:19,196 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:19,199 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:19,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39667 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:29:19,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35189 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:29:19,222 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79 with version=8 2024-11-13T00:29:19,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/hbase-staging 2024-11-13T00:29:19,225 INFO [Time-limited test {}] client.ConnectionUtils(128): master/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:29:19,225 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:19,225 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:19,225 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:29:19,225 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:19,225 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:29:19,226 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T00:29:19,226 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:29:19,226 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32917 2024-11-13T00:29:19,228 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32917 connecting to ZooKeeper ensemble=127.0.0.1:62272 2024-11-13T00:29:19,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329170x0, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:29:19,240 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32917-0x1010ebaa2690000 connected 2024-11-13T00:29:19,276 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:19,278 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:19,283 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:19,283 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79, hbase.cluster.distributed=false 2024-11-13T00:29:19,285 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:29:19,291 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32917 2024-11-13T00:29:19,291 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32917 2024-11-13T00:29:19,292 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32917 2024-11-13T00:29:19,296 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32917 2024-11-13T00:29:19,298 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32917 2024-11-13T00:29:19,324 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:29:19,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:19,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:19,324 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:29:19,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:19,324 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:29:19,324 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T00:29:19,325 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:29:19,326 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41233 2024-11-13T00:29:19,328 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41233 connecting to ZooKeeper ensemble=127.0.0.1:62272 2024-11-13T00:29:19,329 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:19,332 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:19,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412330x0, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:29:19,339 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:412330x0, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:19,340 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T00:29:19,340 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41233-0x1010ebaa2690001 connected 2024-11-13T00:29:19,344 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T00:29:19,345 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T00:29:19,346 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:29:19,348 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41233 2024-11-13T00:29:19,348 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41233 2024-11-13T00:29:19,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41233 2024-11-13T00:29:19,354 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41233 2024-11-13T00:29:19,356 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41233 2024-11-13T00:29:19,375 DEBUG [M:0;cb28580903fb:32917 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;cb28580903fb:32917 2024-11-13T00:29:19,375 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/cb28580903fb,32917,1731457759225 2024-11-13T00:29:19,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:29:19,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:29:19,378 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/cb28580903fb,32917,1731457759225 2024-11-13T00:29:19,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T00:29:19,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:19,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:19,380 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T00:29:19,381 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/cb28580903fb,32917,1731457759225 from backup master directory 2024-11-13T00:29:19,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:29:19,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/cb28580903fb,32917,1731457759225 2024-11-13T00:29:19,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:29:19,384 WARN [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:29:19,384 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=cb28580903fb,32917,1731457759225 2024-11-13T00:29:19,390 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/hbase.id] with ID: 4d801d72-d321-46f8-ab63-ff446a04ba64 2024-11-13T00:29:19,390 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/.tmp/hbase.id 2024-11-13T00:29:19,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39667 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:29:19,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35189 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:29:19,399 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/.tmp/hbase.id]:[hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/hbase.id] 2024-11-13T00:29:19,428 INFO [master/cb28580903fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:19,428 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T00:29:19,430 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-13T00:29:19,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:19,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:19,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35189 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:29:19,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39667 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:29:19,449 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:29:19,450 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T00:29:19,451 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:29:19,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39667 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:29:19,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35189 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:29:19,482 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store 2024-11-13T00:29:19,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39667 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:29:19,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35189 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:29:19,505 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:29:19,505 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:29:19,505 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:19,506 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:19,506 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:29:19,506 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:19,506 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:29:19,506 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457759505Disabling compacts and flushes for region at 1731457759505Disabling writes for close at 1731457759506 (+1 ms)Writing region close event to WAL at 1731457759506Closed at 1731457759506 2024-11-13T00:29:19,507 WARN [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/.initializing 2024-11-13T00:29:19,507 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225 2024-11-13T00:29:19,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:29:19,514 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:29:19,519 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C32917%2C1731457759225, suffix=, logDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225, archiveDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/oldWALs, maxLogs=10 2024-11-13T00:29:19,520 INFO [master/cb28580903fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C32917%2C1731457759225.1731457759520 2024-11-13T00:29:19,538 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 2024-11-13T00:29:19,549 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34993:34993),(127.0.0.1/127.0.0.1:43759:43759)] 2024-11-13T00:29:19,553 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:29:19,553 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:29:19,553 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,553 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,557 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T00:29:19,559 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:19,560 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:19,560 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,562 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T00:29:19,562 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:19,562 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:29:19,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T00:29:19,564 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:19,565 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:29:19,565 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,567 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T00:29:19,567 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:19,567 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:29:19,567 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,568 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,569 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,571 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,571 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,572 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T00:29:19,573 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:29:19,576 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:29:19,576 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841385, jitterRate=0.0698775202035904}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T00:29:19,577 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731457759553Initializing all the Stores at 1731457759556 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457759556Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457759556Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457759557 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457759557Cleaning up temporary data from old regions at 1731457759571 (+14 ms)Region opened successfully at 1731457759577 (+6 ms) 2024-11-13T00:29:19,577 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T00:29:19,582 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e2ebfe5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:29:19,583 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T00:29:19,583 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T00:29:19,583 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T00:29:19,583 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T00:29:19,584 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T00:29:19,584 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T00:29:19,584 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T00:29:19,587 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T00:29:19,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T00:29:19,589 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T00:29:19,589 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T00:29:19,590 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T00:29:19,593 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T00:29:19,593 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T00:29:19,599 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T00:29:19,600 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T00:29:19,601 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T00:29:19,604 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T00:29:19,605 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T00:29:19,606 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T00:29:19,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:19,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:19,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:19,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:19,609 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=cb28580903fb,32917,1731457759225, sessionid=0x1010ebaa2690000, setting cluster-up flag (Was=false) 2024-11-13T00:29:19,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:19,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:19,619 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T00:29:19,620 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,32917,1731457759225 2024-11-13T00:29:19,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:19,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:19,633 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T00:29:19,635 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,32917,1731457759225 2024-11-13T00:29:19,636 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T00:29:19,639 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T00:29:19,639 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T00:29:19,639 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T00:29:19,639 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: cb28580903fb,32917,1731457759225 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T00:29:19,640 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:29:19,641 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:29:19,641 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:29:19,641 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:29:19,641 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/cb28580903fb:0, corePoolSize=10, maxPoolSize=10 2024-11-13T00:29:19,641 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,641 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:29:19,641 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,643 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:29:19,643 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T00:29:19,644 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:19,644 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731457789644 2024-11-13T00:29:19,644 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T00:29:19,644 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T00:29:19,644 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T00:29:19,644 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T00:29:19,644 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T00:29:19,644 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T00:29:19,645 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T00:29:19,648 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,648 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T00:29:19,648 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T00:29:19,649 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T00:29:19,649 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T00:29:19,649 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T00:29:19,649 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457759649,5,FailOnTimeoutGroup] 2024-11-13T00:29:19,652 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457759649,5,FailOnTimeoutGroup] 2024-11-13T00:29:19,652 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,652 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T00:29:19,652 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,652 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39667 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:29:19,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35189 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:29:19,657 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T00:29:19,657 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79 2024-11-13T00:29:19,658 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(746): ClusterId : 4d801d72-d321-46f8-ab63-ff446a04ba64 2024-11-13T00:29:19,658 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T00:29:19,661 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T00:29:19,661 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T00:29:19,671 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T00:29:19,672 DEBUG [RS:0;cb28580903fb:41233 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9e33cce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:29:19,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35189 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:29:19,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39667 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:29:19,684 DEBUG [RS:0;cb28580903fb:41233 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;cb28580903fb:41233 2024-11-13T00:29:19,684 INFO [RS:0;cb28580903fb:41233 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T00:29:19,684 INFO [RS:0;cb28580903fb:41233 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T00:29:19,684 DEBUG [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T00:29:19,685 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(2659): reportForDuty to master=cb28580903fb,32917,1731457759225 with port=41233, startcode=1731457759323 2024-11-13T00:29:19,685 DEBUG [RS:0;cb28580903fb:41233 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T00:29:19,688 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34869, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T00:29:19,688 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32917 {}] master.ServerManager(363): Checking decommissioned status of RegionServer cb28580903fb,41233,1731457759323 2024-11-13T00:29:19,688 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32917 {}] master.ServerManager(517): Registering regionserver=cb28580903fb,41233,1731457759323 2024-11-13T00:29:19,690 DEBUG [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79 2024-11-13T00:29:19,690 DEBUG [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34283 2024-11-13T00:29:19,690 DEBUG [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T00:29:19,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:29:19,693 DEBUG [RS:0;cb28580903fb:41233 {}] zookeeper.ZKUtil(111): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/cb28580903fb,41233,1731457759323 2024-11-13T00:29:19,693 WARN [RS:0;cb28580903fb:41233 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:29:19,693 INFO [RS:0;cb28580903fb:41233 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:29:19,693 DEBUG [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323 2024-11-13T00:29:19,693 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [cb28580903fb,41233,1731457759323] 2024-11-13T00:29:19,699 INFO [RS:0;cb28580903fb:41233 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T00:29:19,701 INFO [RS:0;cb28580903fb:41233 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T00:29:19,702 INFO [RS:0;cb28580903fb:41233 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T00:29:19,702 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,704 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T00:29:19,705 INFO [RS:0;cb28580903fb:41233 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T00:29:19,705 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,705 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,705 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,705 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,705 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,706 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,706 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:29:19,706 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,706 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,706 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,706 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,706 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,706 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:19,706 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:29:19,706 DEBUG [RS:0;cb28580903fb:41233 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:29:19,706 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,706 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,707 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,707 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,707 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,707 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,41233,1731457759323-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:29:19,724 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T00:29:19,724 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,41233,1731457759323-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,724 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,724 INFO [RS:0;cb28580903fb:41233 {}] regionserver.Replication(171): cb28580903fb,41233,1731457759323 started 2024-11-13T00:29:19,743 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:19,743 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1482): Serving as cb28580903fb,41233,1731457759323, RpcServer on cb28580903fb/172.17.0.2:41233, sessionid=0x1010ebaa2690001 2024-11-13T00:29:19,743 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T00:29:19,743 DEBUG [RS:0;cb28580903fb:41233 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager cb28580903fb,41233,1731457759323 2024-11-13T00:29:19,743 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,41233,1731457759323' 2024-11-13T00:29:19,743 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T00:29:19,744 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T00:29:19,744 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T00:29:19,744 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T00:29:19,745 DEBUG [RS:0;cb28580903fb:41233 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager cb28580903fb,41233,1731457759323 2024-11-13T00:29:19,745 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,41233,1731457759323' 2024-11-13T00:29:19,745 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T00:29:19,745 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T00:29:19,746 DEBUG [RS:0;cb28580903fb:41233 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T00:29:19,746 INFO [RS:0;cb28580903fb:41233 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T00:29:19,746 INFO [RS:0;cb28580903fb:41233 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T00:29:19,849 INFO [RS:0;cb28580903fb:41233 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C41233%2C1731457759323, suffix=, logDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323, archiveDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs, maxLogs=32 2024-11-13T00:29:19,850 INFO [RS:0;cb28580903fb:41233 {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C41233%2C1731457759323.1731457759850 2024-11-13T00:29:19,862 INFO [RS:0;cb28580903fb:41233 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 2024-11-13T00:29:19,869 DEBUG [RS:0;cb28580903fb:41233 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43759:43759),(127.0.0.1/127.0.0.1:34993:34993)] 2024-11-13T00:29:20,037 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T00:29:20,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:29:20,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:29:20,068 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:29:20,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:29:20,073 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:29:20,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:29:20,078 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:29:20,078 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T00:29:20,078 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:29:20,078 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-13T00:29:20,078 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:20,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:20,079 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:29:20,081 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:29:20,081 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:20,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:20,081 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:29:20,083 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:29:20,083 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:20,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:20,084 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:29:20,085 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:29:20,085 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:20,086 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:20,086 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:29:20,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740 2024-11-13T00:29:20,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740 2024-11-13T00:29:20,090 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:29:20,090 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:29:20,091 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:29:20,093 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:29:20,097 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:29:20,097 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859849, jitterRate=0.09335561096668243}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:29:20,098 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731457760074Initializing all the Stores at 1731457760075 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457760075Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457760075Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457760075Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457760075Cleaning up temporary data from old regions at 1731457760090 (+15 ms)Region opened successfully at 1731457760098 (+8 ms) 2024-11-13T00:29:20,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:29:20,099 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:29:20,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:29:20,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:29:20,099 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:29:20,100 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:29:20,102 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457760098Disabling compacts and flushes for region at 1731457760098Disabling writes for close at 1731457760099 (+1 ms)Writing region close event to WAL at 1731457760100 (+1 ms)Closed at 1731457760100 2024-11-13T00:29:20,103 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:29:20,104 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T00:29:20,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T00:29:20,106 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:29:20,107 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T00:29:20,258 DEBUG [cb28580903fb:32917 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T00:29:20,258 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=cb28580903fb,41233,1731457759323 2024-11-13T00:29:20,260 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,41233,1731457759323, state=OPENING 2024-11-13T00:29:20,263 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T00:29:20,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:20,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:20,266 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:29:20,266 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:29:20,266 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,41233,1731457759323}] 2024-11-13T00:29:20,266 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:29:20,419 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T00:29:20,422 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42325, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T00:29:20,427 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T00:29:20,428 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:29:20,430 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C41233%2C1731457759323.meta, suffix=.meta, logDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323, archiveDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs, maxLogs=32 2024-11-13T00:29:20,431 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta 2024-11-13T00:29:20,437 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta 2024-11-13T00:29:20,441 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43759:43759),(127.0.0.1/127.0.0.1:34993:34993)] 2024-11-13T00:29:20,444 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:29:20,445 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T00:29:20,445 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T00:29:20,445 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T00:29:20,445 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T00:29:20,445 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:29:20,445 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T00:29:20,445 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T00:29:20,448 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:29:20,450 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:29:20,450 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:20,450 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:20,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:29:20,452 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:29:20,452 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:20,452 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:20,452 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:29:20,453 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:29:20,454 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:20,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:20,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:29:20,455 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:29:20,455 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:20,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:29:20,456 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:29:20,457 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740 2024-11-13T00:29:20,458 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740 2024-11-13T00:29:20,460 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:29:20,460 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:29:20,461 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:29:20,462 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:29:20,464 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=844853, jitterRate=0.07428695261478424}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:29:20,464 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T00:29:20,465 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731457760446Writing region info on filesystem at 1731457760446Initializing all the Stores at 1731457760447 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457760447Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457760448 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457760448Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457760448Cleaning up temporary data from old regions at 1731457760460 (+12 ms)Running coprocessor post-open hooks at 1731457760464 (+4 ms)Region opened successfully at 1731457760464 2024-11-13T00:29:20,466 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731457760419 2024-11-13T00:29:20,470 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T00:29:20,470 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T00:29:20,471 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,41233,1731457759323 2024-11-13T00:29:20,472 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,41233,1731457759323, state=OPEN 2024-11-13T00:29:20,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:29:20,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:29:20,480 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=cb28580903fb,41233,1731457759323 2024-11-13T00:29:20,480 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:29:20,480 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:29:20,484 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T00:29:20,484 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,41233,1731457759323 in 214 msec 2024-11-13T00:29:20,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T00:29:20,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 380 msec 2024-11-13T00:29:20,489 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:29:20,489 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T00:29:20,491 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:29:20,491 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,41233,1731457759323, seqNum=-1] 2024-11-13T00:29:20,491 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:29:20,493 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58975, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:29:20,499 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 860 msec 2024-11-13T00:29:20,499 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731457760499, completionTime=-1 2024-11-13T00:29:20,500 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T00:29:20,500 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T00:29:20,502 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T00:29:20,502 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731457820502 2024-11-13T00:29:20,502 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731457880502 2024-11-13T00:29:20,502 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-13T00:29:20,502 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,32917,1731457759225-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,502 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,32917,1731457759225-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,502 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,32917,1731457759225-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,502 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-cb28580903fb:32917, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,502 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,503 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,504 DEBUG [master/cb28580903fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T00:29:20,507 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.123sec 2024-11-13T00:29:20,507 INFO [master/cb28580903fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T00:29:20,507 INFO [master/cb28580903fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T00:29:20,507 INFO [master/cb28580903fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T00:29:20,507 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T00:29:20,507 INFO [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T00:29:20,507 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,32917,1731457759225-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:29:20,507 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,32917,1731457759225-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T00:29:20,510 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T00:29:20,510 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T00:29:20,510 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,32917,1731457759225-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,559 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69bb1e39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:29:20,559 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request cb28580903fb,32917,-1 for getting cluster id 2024-11-13T00:29:20,559 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T00:29:20,562 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4d801d72-d321-46f8-ab63-ff446a04ba64' 2024-11-13T00:29:20,562 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T00:29:20,562 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4d801d72-d321-46f8-ab63-ff446a04ba64" 2024-11-13T00:29:20,563 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d65fa4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:29:20,563 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [cb28580903fb,32917,-1] 2024-11-13T00:29:20,563 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T00:29:20,564 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:20,565 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47470, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T00:29:20,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43ab55cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:29:20,566 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:29:20,567 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,41233,1731457759323, seqNum=-1] 2024-11-13T00:29:20,568 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:29:20,569 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41902, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:29:20,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=cb28580903fb,32917,1731457759225 2024-11-13T00:29:20,571 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:20,574 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T00:29:20,594 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:29:20,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:20,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:20,594 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:29:20,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:29:20,594 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:29:20,594 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T00:29:20,594 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:29:20,595 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35015 2024-11-13T00:29:20,597 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35015 connecting to ZooKeeper ensemble=127.0.0.1:62272 2024-11-13T00:29:20,598 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:20,600 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:29:20,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350150x0, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:29:20,606 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35015-0x1010ebaa2690002 connected 2024-11-13T00:29:20,606 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-13T00:29:20,606 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35015-0x1010ebaa2690002, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-13T00:29:20,607 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T00:29:20,613 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T00:29:20,614 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35015-0x1010ebaa2690002, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T00:29:20,616 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35015-0x1010ebaa2690002, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:29:20,616 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35015 2024-11-13T00:29:20,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35015 2024-11-13T00:29:20,618 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35015 2024-11-13T00:29:20,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35015 2024-11-13T00:29:20,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35015 2024-11-13T00:29:20,626 INFO [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(746): ClusterId : 4d801d72-d321-46f8-ab63-ff446a04ba64 2024-11-13T00:29:20,626 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T00:29:20,629 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T00:29:20,629 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T00:29:20,631 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T00:29:20,632 DEBUG [RS:1;cb28580903fb:35015 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c027df9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:29:20,651 DEBUG [RS:1;cb28580903fb:35015 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;cb28580903fb:35015 2024-11-13T00:29:20,651 INFO [RS:1;cb28580903fb:35015 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T00:29:20,651 INFO [RS:1;cb28580903fb:35015 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T00:29:20,651 DEBUG [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T00:29:20,652 INFO [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(2659): reportForDuty to master=cb28580903fb,32917,1731457759225 with port=35015, startcode=1731457760593 2024-11-13T00:29:20,652 DEBUG [RS:1;cb28580903fb:35015 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T00:29:20,654 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50523, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T00:29:20,655 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32917 {}] master.ServerManager(363): Checking decommissioned status of RegionServer cb28580903fb,35015,1731457760593 2024-11-13T00:29:20,655 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32917 {}] master.ServerManager(517): Registering regionserver=cb28580903fb,35015,1731457760593 2024-11-13T00:29:20,657 DEBUG [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79 2024-11-13T00:29:20,657 DEBUG [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34283 2024-11-13T00:29:20,657 DEBUG [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T00:29:20,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:29:20,661 DEBUG [RS:1;cb28580903fb:35015 {}] zookeeper.ZKUtil(111): regionserver:35015-0x1010ebaa2690002, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/cb28580903fb,35015,1731457760593 2024-11-13T00:29:20,661 WARN [RS:1;cb28580903fb:35015 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:29:20,661 INFO [RS:1;cb28580903fb:35015 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:29:20,661 DEBUG [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593 2024-11-13T00:29:20,662 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [cb28580903fb,35015,1731457760593] 2024-11-13T00:29:20,668 INFO [RS:1;cb28580903fb:35015 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T00:29:20,672 INFO [RS:1;cb28580903fb:35015 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T00:29:20,674 INFO [RS:1;cb28580903fb:35015 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T00:29:20,674 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,675 INFO [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T00:29:20,675 INFO [RS:1;cb28580903fb:35015 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T00:29:20,676 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:29:20,676 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:29:20,677 DEBUG [RS:1;cb28580903fb:35015 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:29:20,677 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,677 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,677 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,677 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,677 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,677 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35015,1731457760593-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:29:20,698 INFO [RS:1;cb28580903fb:35015 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T00:29:20,699 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35015,1731457760593-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,699 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,699 INFO [RS:1;cb28580903fb:35015 {}] regionserver.Replication(171): cb28580903fb,35015,1731457760593 started 2024-11-13T00:29:20,720 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:29:20,720 INFO [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(1482): Serving as cb28580903fb,35015,1731457760593, RpcServer on cb28580903fb/172.17.0.2:35015, sessionid=0x1010ebaa2690002 2024-11-13T00:29:20,720 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T00:29:20,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;cb28580903fb:35015,5,FailOnTimeoutGroup] 2024-11-13T00:29:20,721 DEBUG [RS:1;cb28580903fb:35015 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager cb28580903fb,35015,1731457760593 2024-11-13T00:29:20,721 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,35015,1731457760593' 2024-11-13T00:29:20,721 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T00:29:20,721 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-13T00:29:20,721 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T00:29:20,721 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T00:29:20,722 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T00:29:20,722 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T00:29:20,722 DEBUG [RS:1;cb28580903fb:35015 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager cb28580903fb,35015,1731457760593 2024-11-13T00:29:20,722 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,35015,1731457760593' 2024-11-13T00:29:20,722 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T00:29:20,722 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is cb28580903fb,32917,1731457759225 2024-11-13T00:29:20,722 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T00:29:20,723 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@cb496b3 2024-11-13T00:29:20,723 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T00:29:20,723 DEBUG [RS:1;cb28580903fb:35015 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T00:29:20,723 INFO [RS:1;cb28580903fb:35015 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T00:29:20,723 INFO [RS:1;cb28580903fb:35015 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T00:29:20,725 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36582, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T00:29:20,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32917 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T00:29:20,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32917 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T00:29:20,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32917 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:29:20,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32917 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T00:29:20,729 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T00:29:20,730 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:20,730 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32917 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-13T00:29:20,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32917 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T00:29:20,731 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T00:29:20,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39667 is added to blk_1073741835_1011 (size=393) 2024-11-13T00:29:20,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35189 is added to blk_1073741835_1011 (size=393) 2024-11-13T00:29:20,745 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => dc52a27dfbcfebe0e13c82666f5a1780, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79 2024-11-13T00:29:20,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39667 is added to blk_1073741836_1012 (size=76) 2024-11-13T00:29:20,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35189 is added to blk_1073741836_1012 (size=76) 2024-11-13T00:29:20,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:29:20,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing dc52a27dfbcfebe0e13c82666f5a1780, disabling compactions & flushes 2024-11-13T00:29:20,755 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:20,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:20,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. after waiting 0 ms 2024-11-13T00:29:20,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:20,755 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:20,755 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for dc52a27dfbcfebe0e13c82666f5a1780: Waiting for close lock at 1731457760755Disabling compacts and flushes for region at 1731457760755Disabling writes for close at 1731457760755Writing region close event to WAL at 1731457760755Closed at 1731457760755 2024-11-13T00:29:20,757 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T00:29:20,757 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731457760757"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731457760757"}]},"ts":"1731457760757"} 2024-11-13T00:29:20,760 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T00:29:20,761 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T00:29:20,762 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731457760761"}]},"ts":"1731457760761"} 2024-11-13T00:29:20,764 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-13T00:29:20,764 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=dc52a27dfbcfebe0e13c82666f5a1780, ASSIGN}] 2024-11-13T00:29:20,766 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=dc52a27dfbcfebe0e13c82666f5a1780, ASSIGN 2024-11-13T00:29:20,767 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=dc52a27dfbcfebe0e13c82666f5a1780, ASSIGN; state=OFFLINE, location=cb28580903fb,41233,1731457759323; forceNewPlan=false, retain=false 2024-11-13T00:29:20,826 INFO [RS:1;cb28580903fb:35015 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C35015%2C1731457760593, suffix=, logDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593, archiveDir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs, maxLogs=32 2024-11-13T00:29:20,827 INFO [RS:1;cb28580903fb:35015 {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35015%2C1731457760593.1731457760827 2024-11-13T00:29:20,837 INFO [RS:1;cb28580903fb:35015 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 2024-11-13T00:29:20,849 DEBUG [RS:1;cb28580903fb:35015 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34993:34993),(127.0.0.1/127.0.0.1:43759:43759)] 2024-11-13T00:29:20,918 INFO [cb28580903fb:32917 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-13T00:29:20,918 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=dc52a27dfbcfebe0e13c82666f5a1780, regionState=OPENING, regionLocation=cb28580903fb,41233,1731457759323 2024-11-13T00:29:20,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=dc52a27dfbcfebe0e13c82666f5a1780, ASSIGN because future has completed 2024-11-13T00:29:20,923 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc52a27dfbcfebe0e13c82666f5a1780, server=cb28580903fb,41233,1731457759323}] 2024-11-13T00:29:21,083 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:21,084 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => dc52a27dfbcfebe0e13c82666f5a1780, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:29:21,084 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,085 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:29:21,085 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,085 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,091 INFO [StoreOpener-dc52a27dfbcfebe0e13c82666f5a1780-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,092 INFO [StoreOpener-dc52a27dfbcfebe0e13c82666f5a1780-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dc52a27dfbcfebe0e13c82666f5a1780 columnFamilyName info 2024-11-13T00:29:21,092 DEBUG [StoreOpener-dc52a27dfbcfebe0e13c82666f5a1780-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:29:21,093 INFO [StoreOpener-dc52a27dfbcfebe0e13c82666f5a1780-1 {}] regionserver.HStore(327): Store=dc52a27dfbcfebe0e13c82666f5a1780/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:29:21,093 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,094 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,094 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,095 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,095 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,096 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,101 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:29:21,101 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened dc52a27dfbcfebe0e13c82666f5a1780; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711169, jitterRate=-0.09570187330245972}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T00:29:21,102 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:21,102 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for dc52a27dfbcfebe0e13c82666f5a1780: Running coprocessor pre-open hook at 1731457761085Writing region info on filesystem at 1731457761085Initializing all the Stores at 1731457761088 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457761088Cleaning up temporary data from old regions at 1731457761095 (+7 ms)Running coprocessor post-open hooks at 1731457761102 (+7 ms)Region opened successfully at 1731457761102 2024-11-13T00:29:21,104 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780., pid=6, masterSystemTime=1731457761078 2024-11-13T00:29:21,107 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:21,107 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:21,108 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=dc52a27dfbcfebe0e13c82666f5a1780, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,41233,1731457759323 2024-11-13T00:29:21,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure dc52a27dfbcfebe0e13c82666f5a1780, server=cb28580903fb,41233,1731457759323 because future has completed 2024-11-13T00:29:21,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T00:29:21,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure dc52a27dfbcfebe0e13c82666f5a1780, server=cb28580903fb,41233,1731457759323 in 193 msec 2024-11-13T00:29:21,123 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T00:29:21,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=dc52a27dfbcfebe0e13c82666f5a1780, ASSIGN in 356 msec 2024-11-13T00:29:21,125 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T00:29:21,126 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731457761125"}]},"ts":"1731457761125"} 2024-11-13T00:29:21,131 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-13T00:29:21,132 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T00:29:21,135 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 407 msec 2024-11-13T00:29:25,580 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T00:29:25,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:29:25,600 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:29:25,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:29:25,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:29:25,700 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-13T00:29:29,509 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T00:29:29,509 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T00:29:29,510 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T00:29:29,510 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-13T00:29:29,511 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:29:29,511 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T00:29:30,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32917 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T00:29:30,776 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-13T00:29:30,776 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-13T00:29:30,779 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T00:29:30,779 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:30,792 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:30,796 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:29:30,796 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:29:30,796 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:29:30,796 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:29:30,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48400c5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:29:30,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2378f0ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:29:30,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ab7c419{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/java.io.tmpdir/jetty-localhost-37643-hadoop-hdfs-3_4_1-tests_jar-_-any-4363935707261538210/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:30,913 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35f8237e{HTTP/1.1, (http/1.1)}{localhost:37643} 2024-11-13T00:29:30,914 INFO [Time-limited test {}] server.Server(415): Started @116762ms 2024-11-13T00:29:30,915 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:29:30,952 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:30,955 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:29:30,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:29:30,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:29:30,957 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:29:30,957 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a3ddcf2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:29:30,957 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ee7700a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:29:31,011 WARN [Thread-827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data5/current/BP-155329190-172.17.0.2-1731457758228/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:31,011 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data6/current/BP-155329190-172.17.0.2-1731457758228/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:31,031 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:29:31,034 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xec5824facbe98662 with lease ID 0x8693d6b9cb094358: Processing first storage report for DS-ce41437c-0062-4f42-ab04-40d754994e31 from datanode DatanodeRegistration(127.0.0.1:33729, datanodeUuid=d265e119-ea52-4573-b71a-d3e9d15f102d, infoPort=40735, infoSecurePort=0, ipcPort=36497, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228) 2024-11-13T00:29:31,034 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec5824facbe98662 with lease ID 0x8693d6b9cb094358: from storage DS-ce41437c-0062-4f42-ab04-40d754994e31 node DatanodeRegistration(127.0.0.1:33729, datanodeUuid=d265e119-ea52-4573-b71a-d3e9d15f102d, infoPort=40735, infoSecurePort=0, ipcPort=36497, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:31,034 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xec5824facbe98662 with lease ID 0x8693d6b9cb094358: Processing first storage report for DS-77cfbf58-da74-4e78-b674-d6e679634f85 from datanode DatanodeRegistration(127.0.0.1:33729, datanodeUuid=d265e119-ea52-4573-b71a-d3e9d15f102d, infoPort=40735, infoSecurePort=0, ipcPort=36497, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228) 2024-11-13T00:29:31,034 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec5824facbe98662 with lease ID 0x8693d6b9cb094358: from storage DS-77cfbf58-da74-4e78-b674-d6e679634f85 node DatanodeRegistration(127.0.0.1:33729, datanodeUuid=d265e119-ea52-4573-b71a-d3e9d15f102d, infoPort=40735, infoSecurePort=0, ipcPort=36497, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:31,076 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@13a31f2a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/java.io.tmpdir/jetty-localhost-39363-hadoop-hdfs-3_4_1-tests_jar-_-any-5010714164479109370/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:31,076 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37073a3f{HTTP/1.1, (http/1.1)}{localhost:39363} 2024-11-13T00:29:31,076 INFO [Time-limited test {}] server.Server(415): Started @116924ms 2024-11-13T00:29:31,078 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:29:31,129 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:31,134 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:29:31,144 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:29:31,145 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:29:31,145 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:29:31,147 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d0c94cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:29:31,148 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4cb48605{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:29:31,195 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data7/current/BP-155329190-172.17.0.2-1731457758228/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:31,195 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data8/current/BP-155329190-172.17.0.2-1731457758228/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:31,219 WARN [Thread-842 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:29:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd21673a1d7a3175b with lease ID 0x8693d6b9cb094359: Processing first storage report for DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be from datanode DatanodeRegistration(127.0.0.1:40983, datanodeUuid=4a04d4bc-f330-4e79-b94b-74f0b35167bf, infoPort=36359, infoSecurePort=0, ipcPort=39275, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228) 2024-11-13T00:29:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd21673a1d7a3175b with lease ID 0x8693d6b9cb094359: from storage DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be node DatanodeRegistration(127.0.0.1:40983, datanodeUuid=4a04d4bc-f330-4e79-b94b-74f0b35167bf, infoPort=36359, infoSecurePort=0, ipcPort=39275, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd21673a1d7a3175b with lease ID 0x8693d6b9cb094359: Processing first storage report for DS-ae37a78f-23ca-4d7a-88bd-ed409678bc7a from datanode DatanodeRegistration(127.0.0.1:40983, datanodeUuid=4a04d4bc-f330-4e79-b94b-74f0b35167bf, infoPort=36359, infoSecurePort=0, ipcPort=39275, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228) 2024-11-13T00:29:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd21673a1d7a3175b with lease ID 0x8693d6b9cb094359: from storage DS-ae37a78f-23ca-4d7a-88bd-ed409678bc7a node DatanodeRegistration(127.0.0.1:40983, datanodeUuid=4a04d4bc-f330-4e79-b94b-74f0b35167bf, infoPort=36359, infoSecurePort=0, ipcPort=39275, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:31,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77de3d88{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/java.io.tmpdir/jetty-localhost-46285-hadoop-hdfs-3_4_1-tests_jar-_-any-13310310145477578245/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:31,272 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@55e6039a{HTTP/1.1, (http/1.1)}{localhost:46285} 2024-11-13T00:29:31,272 INFO [Time-limited test {}] server.Server(415): Started @117120ms 2024-11-13T00:29:31,273 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:29:31,378 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9/current/BP-155329190-172.17.0.2-1731457758228/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:31,378 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10/current/BP-155329190-172.17.0.2-1731457758228/current, will proceed with Du for space computation calculation, 2024-11-13T00:29:31,395 WARN [Thread-877 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:29:31,397 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e5b8c8e1123e8d8 with lease ID 0x8693d6b9cb09435a: Processing first storage report for DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68 from datanode DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228) 2024-11-13T00:29:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e5b8c8e1123e8d8 with lease ID 0x8693d6b9cb09435a: from storage DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68 node DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e5b8c8e1123e8d8 with lease ID 0x8693d6b9cb09435a: Processing first storage report for DS-532d2603-04ee-4376-9a0b-1c024386d8c8 from datanode DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228) 2024-11-13T00:29:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e5b8c8e1123e8d8 with lease ID 0x8693d6b9cb09435a: from storage DS-532d2603-04ee-4376-9a0b-1c024386d8c8 node DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:31,494 WARN [ResponseProcessor for block BP-155329190-172.17.0.2-1731457758228:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-155329190-172.17.0.2-1731457758228:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,494 WARN [ResponseProcessor for block BP-155329190-172.17.0.2-1731457758228:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-155329190-172.17.0.2-1731457758228:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,494 WARN [ResponseProcessor for block BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,494 WARN [ResponseProcessor for block BP-155329190-172.17.0.2-1731457758228:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-155329190-172.17.0.2-1731457758228:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-155329190-172.17.0.2-1731457758228:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,495 WARN [DataStreamer for file /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 block BP-155329190-172.17.0.2-1731457758228:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:31,494 WARN [DataStreamer for file /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta block BP-155329190-172.17.0.2-1731457758228:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:31,495 WARN [DataStreamer for file /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 block BP-155329190-172.17.0.2-1731457758228:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:31,495 WARN [PacketResponder: BP-155329190-172.17.0.2-1731457758228:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35189] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,495 WARN [DataStreamer for file /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 block BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:31,496 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2041038606_22 at /127.0.0.1:39878 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:39667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39878 dst: /127.0.0.1:39667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,496 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:48152 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:35189:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48152 dst: /127.0.0.1:35189 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,496 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54938 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54938 dst: /127.0.0.1:39667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,496 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2041038606_22 at /127.0.0.1:53734 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:35189:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53734 dst: /127.0.0.1:35189 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,496 WARN [PacketResponder: BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:35189] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,497 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:48148 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:35189:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48148 dst: /127.0.0.1:35189 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,497 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_10793125_22 at /127.0.0.1:54910 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54910 dst: /127.0.0.1:39667 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,497 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54924 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54924 dst: /127.0.0.1:39667 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,497 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_10793125_22 at /127.0.0.1:48108 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:35189:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48108 dst: /127.0.0.1:35189 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,558 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3893f69{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:31,558 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@206fa4ee{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:29:31,559 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:29:31,559 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c9fa828{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:29:31,559 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30985369{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,STOPPED} 2024-11-13T00:29:31,565 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:29:31,565 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:29:31,565 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-155329190-172.17.0.2-1731457758228 (Datanode Uuid 15755521-cc20-4ae7-8d60-99b27f2bc8a1) service to localhost/127.0.0.1:34283 2024-11-13T00:29:31,565 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:29:31,566 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data3/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:31,566 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data4/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:31,567 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:29:31,574 WARN [DataStreamer for file /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta block BP-155329190-172.17.0.2-1731457758228:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,574 WARN [DataStreamer for file /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 block BP-155329190-172.17.0.2-1731457758228:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,580 WARN [ResponseProcessor for block BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,581 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@3ea81664 {}] datanode.DataXceiver(331): 127.0.0.1:39667:DataXceiver error processing unknown operation src: /127.0.0.1:35018 dst: /127.0.0.1:39667 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_10793125_22 at /127.0.0.1:35006 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39667:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35006 dst: /127.0.0.1:39667 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:31,581 WARN [DataStreamer for file /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 block BP-155329190-172.17.0.2-1731457758228:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24d45158{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:31,588 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76afaf39{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:29:31,589 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:29:31,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6cb8e75e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:29:31,589 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@778fdefb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,STOPPED} 2024-11-13T00:29:31,591 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:29:31,591 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:29:31,592 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-155329190-172.17.0.2-1731457758228 (Datanode Uuid 7111f6bf-a4eb-45ef-b0e8-709db178ad7d) service to localhost/127.0.0.1:34283 2024-11-13T00:29:31,592 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:29:31,592 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data1/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:31,592 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data2/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:31,593 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:29:31,597 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780., hostname=cb28580903fb,41233,1731457759323, seqNum=2] 2024-11-13T00:29:31,598 ERROR [FSHLog-0-hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79-prefix:cb28580903fb,41233,1731457759323 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,599 WARN [FSHLog-0-hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79-prefix:cb28580903fb,41233,1731457759323 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,599 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,599 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C41233%2C1731457759323:(num 1731457759850) roll requested 2024-11-13T00:29:31,600 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C41233%2C1731457759323.1731457771599 2024-11-13T00:29:31,606 WARN [Thread-903 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,607 WARN [Thread-903 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:31,607 WARN [Thread-903 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741838_1018 2024-11-13T00:29:31,610 WARN [Thread-903 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] 2024-11-13T00:29:31,614 WARN [Thread-903 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1019 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,614 WARN [Thread-903 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:31,615 WARN [Thread-903 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741839_1019 2024-11-13T00:29:31,616 WARN [Thread-903 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:31,635 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:31,635 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:31,636 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:31,636 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:31,636 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:31,636 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457771599 2024-11-13T00:29:31,640 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,641 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:31,642 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-13T00:29:31,642 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-13T00:29:31,643 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 2024-11-13T00:29:31,646 WARN [IPC Server handler 2 on default port 34283 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 has not been closed. Lease recovery is in progress. RecoveryId = 1021 for block blk_1073741833_1009 2024-11-13T00:29:31,651 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 after 5ms 2024-11-13T00:29:31,657 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40735:40735),(127.0.0.1/127.0.0.1:36359:36359)] 2024-11-13T00:29:31,657 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 is not closed yet, will try archiving it next time 2024-11-13T00:29:32,678 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:33,453 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:33,657 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:33,664 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457771599 2024-11-13T00:29:33,665 WARN [ResponseProcessor for block BP-155329190-172.17.0.2-1731457758228:blk_1073741840_1020 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-155329190-172.17.0.2-1731457758228:blk_1073741840_1020 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:33,666 WARN [DataStreamer for file /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457771599 block BP-155329190-172.17.0.2-1731457758228:blk_1073741840_1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741840_1020 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:33,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:40324 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:33729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40324 dst: /127.0.0.1:33729 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:33,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41728 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741840_1020] {}] datanode.DataXceiver(331): 127.0.0.1:40983:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41728 dst: /127.0.0.1:40983 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:33,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ab7c419{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:33,669 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35f8237e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:29:33,669 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:29:33,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2378f0ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:29:33,669 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48400c5f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,STOPPED} 2024-11-13T00:29:33,672 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:29:33,672 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:29:33,672 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-155329190-172.17.0.2-1731457758228 (Datanode Uuid d265e119-ea52-4573-b71a-d3e9d15f102d) service to localhost/127.0.0.1:34283 2024-11-13T00:29:33,672 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:29:33,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data5/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:33,674 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data6/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:33,674 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:29:34,678 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:35,453 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:35,652 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 after 4009ms 2024-11-13T00:29:35,658 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:35,658 WARN [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]] 2024-11-13T00:29:35,658 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C41233%2C1731457759323:(num 1731457771599) roll requested 2024-11-13T00:29:35,659 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C41233%2C1731457759323.1731457775659 2024-11-13T00:29:35,663 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:35,662 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41748 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data8]'}, localName='127.0.0.1:40983', datanodeUuid='4a04d4bc-f330-4e79-b94b-74f0b35167bf', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741841_1023 to mirror 127.0.0.1:33729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:35,663 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:35,663 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741841_1023 2024-11-13T00:29:35,663 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41748 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T00:29:35,663 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41748 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:40983:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41748 dst: /127.0.0.1:40983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:35,663 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:35,668 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:35,668 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:35,668 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:35,668 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:35,669 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:35,669 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457771599 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457775659 2024-11-13T00:29:35,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40983 is added to blk_1073741840_1022 (size=2431) 2024-11-13T00:29:35,673 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34369:34369),(127.0.0.1/127.0.0.1:36359:36359)] 2024-11-13T00:29:35,673 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 is not closed yet, will try archiving it next time 2024-11-13T00:29:35,673 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457771599 is not closed yet, will try archiving it next time 2024-11-13T00:29:35,679 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T00:29:36,072 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 is not closed yet, will try archiving it next time 2024-11-13T00:29:36,679 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:37,235 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@155a9bf4[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:40983, datanodeUuid=4a04d4bc-f330-4e79-b94b-74f0b35167bf, infoPort=36359, infoSecurePort=0, ipcPort=39275, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741840_1022 to 127.0.0.1:39667 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:37,453 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:37,673 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:37,683 WARN [ResponseProcessor for block BP-155329190-172.17.0.2-1731457758228:blk_1073741842_1024 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-155329190-172.17.0.2-1731457758228:blk_1073741842_1024 java.io.IOException: Bad response ERROR for BP-155329190-172.17.0.2-1731457758228:blk_1073741842_1024 from datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:37,684 WARN [DataStreamer for file /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457775659 block BP-155329190-172.17.0.2-1731457758228:blk_1073741842_1024 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:37,683 WARN [PacketResponder: BP-155329190-172.17.0.2-1731457758228:blk_1073741842_1024, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:40983] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:37,684 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41364 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41364 dst: /127.0.0.1:35479 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:37,684 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41760 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:40983:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41760 dst: /127.0.0.1:40983 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:37,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@13a31f2a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:37,686 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37073a3f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:29:37,686 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:29:37,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ee7700a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:29:37,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a3ddcf2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,STOPPED} 2024-11-13T00:29:37,688 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:29:37,688 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:29:37,688 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-155329190-172.17.0.2-1731457758228 (Datanode Uuid 4a04d4bc-f330-4e79-b94b-74f0b35167bf) service to localhost/127.0.0.1:34283 2024-11-13T00:29:37,688 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:29:37,688 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data7/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:37,688 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data8/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:29:37,689 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:29:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41233 {}] regionserver.HRegion(8855): Flush requested on dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:37,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc52a27dfbcfebe0e13c82666f5a1780 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T00:29:37,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/fb255967502f496c86a4cf073e29de33 is 1080, key is row0002/info:/1731457773676/Put/seqid=0 2024-11-13T00:29:37,718 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:37,718 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:37,718 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741843_1026 2024-11-13T00:29:37,719 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] 2024-11-13T00:29:37,720 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:37,720 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:37,721 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741844_1027 2024-11-13T00:29:37,721 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:37,723 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41378 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741845_1028 to mirror 127.0.0.1:33729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:37,723 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:37,724 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41378 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:37,724 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:37,724 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741845_1028 2024-11-13T00:29:37,724 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41378 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41378 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:37,725 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:37,727 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41386 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741846_1029 to mirror 127.0.0.1:40983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:37,727 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:37,727 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41386 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:37,727 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:37,727 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741846_1029 2024-11-13T00:29:37,727 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41386 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41386 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:37,728 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:37,728 WARN [IPC Server handler 2 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T00:29:37,728 WARN [IPC Server handler 2 on default port 34283 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T00:29:37,729 WARN [IPC Server handler 2 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T00:29:37,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741847_1030 (size=10347) 2024-11-13T00:29:38,132 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/fb255967502f496c86a4cf073e29de33 2024-11-13T00:29:38,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/fb255967502f496c86a4cf073e29de33 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/fb255967502f496c86a4cf073e29de33 2024-11-13T00:29:38,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/fb255967502f496c86a4cf073e29de33, entries=5, sequenceid=11, filesize=10.1 K 2024-11-13T00:29:38,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for dc52a27dfbcfebe0e13c82666f5a1780 in 450ms, sequenceid=11, compaction requested=false 2024-11-13T00:29:38,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc52a27dfbcfebe0e13c82666f5a1780: 2024-11-13T00:29:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41233 {}] regionserver.HRegion(8855): Flush requested on dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:38,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc52a27dfbcfebe0e13c82666f5a1780 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-13T00:29:38,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/be7107dcf0074c4696513de34dd08e81 is 1080, key is row0007/info:/1731457777699/Put/seqid=0 2024-11-13T00:29:38,326 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:38,327 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:38,327 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741848_1031 2024-11-13T00:29:38,327 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:38,329 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:38,329 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41412 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741849_1032 to mirror 127.0.0.1:40983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:38,330 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:38,330 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741849_1032 2024-11-13T00:29:38,330 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41412 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:38,330 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41412 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41412 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:38,330 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:38,331 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:38,332 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:38,332 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741850_1033 2024-11-13T00:29:38,332 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] 2024-11-13T00:29:38,334 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:38,335 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:38,335 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741851_1034 2024-11-13T00:29:38,334 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41416 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741851_1034 to mirror 127.0.0.1:33729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:38,335 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41416 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:38,335 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41416 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41416 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:38,335 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:38,336 WARN [IPC Server handler 2 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T00:29:38,336 WARN [IPC Server handler 2 on default port 34283 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T00:29:38,336 WARN [IPC Server handler 2 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T00:29:38,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741852_1035 (size=12506) 2024-11-13T00:29:38,679 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:38,740 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/be7107dcf0074c4696513de34dd08e81 2024-11-13T00:29:38,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/be7107dcf0074c4696513de34dd08e81 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/be7107dcf0074c4696513de34dd08e81 2024-11-13T00:29:38,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/be7107dcf0074c4696513de34dd08e81, entries=7, sequenceid=24, filesize=12.2 K 2024-11-13T00:29:38,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for dc52a27dfbcfebe0e13c82666f5a1780 in 437ms, sequenceid=24, compaction requested=false 2024-11-13T00:29:38,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc52a27dfbcfebe0e13c82666f5a1780: 2024-11-13T00:29:38,757 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-13T00:29:38,757 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:38,757 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/be7107dcf0074c4696513de34dd08e81 because midkey is the same as first or last row 2024-11-13T00:29:39,454 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:39,673 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:39,674 WARN [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]] 2024-11-13T00:29:39,674 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C41233%2C1731457759323:(num 1731457775659) roll requested 2024-11-13T00:29:39,674 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C41233%2C1731457759323.1731457779674 2024-11-13T00:29:39,677 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:39,677 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:39,677 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741853_1036 2024-11-13T00:29:39,678 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:39,679 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:39,680 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:39,680 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741854_1037 2024-11-13T00:29:39,680 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:39,682 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:39,682 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK], DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:39,682 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741855_1038 2024-11-13T00:29:39,682 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:39,684 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:39,684 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:39,684 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741856_1039 2024-11-13T00:29:39,684 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] 2024-11-13T00:29:39,685 WARN [IPC Server handler 4 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T00:29:39,685 WARN [IPC Server handler 4 on default port 34283 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T00:29:39,685 WARN [IPC Server handler 4 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T00:29:39,689 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:39,689 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:39,689 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:39,689 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:39,689 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:39,689 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457775659 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457779674 2024-11-13T00:29:39,690 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34369:34369)] 2024-11-13T00:29:39,690 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 is not closed yet, will try archiving it next time 2024-11-13T00:29:39,690 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457775659 is not closed yet, will try archiving it next time 2024-11-13T00:29:39,691 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457771599 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs/cb28580903fb%2C41233%2C1731457759323.1731457771599 2024-11-13T00:29:39,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741842_1025 (size=25992) 2024-11-13T00:29:39,695 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 is not closed yet, will try archiving it next time 2024-11-13T00:29:39,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41233 {}] regionserver.HRegion(8855): Flush requested on dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:39,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc52a27dfbcfebe0e13c82666f5a1780 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T00:29:39,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/33c2781e4d064cbd98855f5e8824dd64 is 1079, key is tmprow/info:/1731457779740/Put/seqid=0 2024-11-13T00:29:39,754 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:39,754 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:39,754 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741858_1041 2024-11-13T00:29:39,755 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] 2024-11-13T00:29:39,756 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:39,756 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:39,756 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741859_1042 2024-11-13T00:29:39,757 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:39,758 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:39,758 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:39,758 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741860_1043 2024-11-13T00:29:39,759 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:39,766 WARN [Thread-941 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:39,766 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41446 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741861_1044 to mirror 127.0.0.1:33729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:39,766 WARN [Thread-941 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:39,766 WARN [Thread-941 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741861_1044 2024-11-13T00:29:39,766 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41446 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:39,766 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41446 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41446 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:39,767 WARN [Thread-941 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:39,767 WARN [IPC Server handler 1 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T00:29:39,767 WARN [IPC Server handler 1 on default port 34283 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T00:29:39,767 WARN [IPC Server handler 1 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T00:29:39,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741862_1045 (size=6027) 2024-11-13T00:29:40,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/33c2781e4d064cbd98855f5e8824dd64 2024-11-13T00:29:40,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/33c2781e4d064cbd98855f5e8824dd64 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/33c2781e4d064cbd98855f5e8824dd64 2024-11-13T00:29:40,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/33c2781e4d064cbd98855f5e8824dd64, entries=1, sequenceid=34, filesize=5.9 K 2024-11-13T00:29:40,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for dc52a27dfbcfebe0e13c82666f5a1780 in 450ms, sequenceid=34, compaction requested=true 2024-11-13T00:29:40,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc52a27dfbcfebe0e13c82666f5a1780: 2024-11-13T00:29:40,192 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-13T00:29:40,192 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:40,192 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/be7107dcf0074c4696513de34dd08e81 because midkey is the same as first or last row 2024-11-13T00:29:40,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dc52a27dfbcfebe0e13c82666f5a1780:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:29:40,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:29:40,193 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:29:40,194 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:29:40,194 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HStore(1541): dc52a27dfbcfebe0e13c82666f5a1780/info is initiating minor compaction (all files) 2024-11-13T00:29:40,194 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dc52a27dfbcfebe0e13c82666f5a1780/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:40,195 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/fb255967502f496c86a4cf073e29de33, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/be7107dcf0074c4696513de34dd08e81, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/33c2781e4d064cbd98855f5e8824dd64] into tmpdir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp, totalSize=28.2 K 2024-11-13T00:29:40,195 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb255967502f496c86a4cf073e29de33, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731457773676 2024-11-13T00:29:40,196 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.Compactor(225): Compacting be7107dcf0074c4696513de34dd08e81, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731457777699 2024-11-13T00:29:40,196 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.Compactor(225): Compacting 33c2781e4d064cbd98855f5e8824dd64, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731457779740 2024-11-13T00:29:40,224 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dc52a27dfbcfebe0e13c82666f5a1780#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:29:40,225 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/520eb4e18cb84176a4a6d232618f3798 is 1080, key is row0002/info:/1731457773676/Put/seqid=0 2024-11-13T00:29:40,229 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:40,230 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:40,230 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741863_1046 2024-11-13T00:29:40,237 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] 2024-11-13T00:29:40,239 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:40,239 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK], DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:40,239 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741864_1047 2024-11-13T00:29:40,240 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:40,243 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:40,243 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:40,243 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741865_1048 2024-11-13T00:29:40,244 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:40,250 WARN [Thread-948 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:40,250 WARN [Thread-948 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:40,250 WARN [Thread-948 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741866_1049 2024-11-13T00:29:40,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41480 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741866_1049 to mirror 127.0.0.1:40983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:40,250 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41480 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:40,251 WARN [Thread-948 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:40,251 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:41480 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41480 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:40,252 WARN [IPC Server handler 3 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T00:29:40,252 WARN [IPC Server handler 3 on default port 34283 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T00:29:40,252 WARN [IPC Server handler 3 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T00:29:40,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741867_1050 (size=17994) 2024-11-13T00:29:40,273 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/520eb4e18cb84176a4a6d232618f3798 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/520eb4e18cb84176a4a6d232618f3798 2024-11-13T00:29:40,285 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dc52a27dfbcfebe0e13c82666f5a1780/info of dc52a27dfbcfebe0e13c82666f5a1780 into 520eb4e18cb84176a4a6d232618f3798(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:29:40,286 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dc52a27dfbcfebe0e13c82666f5a1780: 2024-11-13T00:29:40,286 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780., storeName=dc52a27dfbcfebe0e13c82666f5a1780/info, priority=13, startTime=1731457780192; duration=0sec 2024-11-13T00:29:40,286 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T00:29:40,286 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:40,286 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/520eb4e18cb84176a4a6d232618f3798 because midkey is the same as first or last row 2024-11-13T00:29:40,287 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T00:29:40,287 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:40,287 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/520eb4e18cb84176a4a6d232618f3798 because midkey is the same as first or last row 2024-11-13T00:29:40,287 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-13T00:29:40,287 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:40,287 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/520eb4e18cb84176a4a6d232618f3798 because midkey is the same as first or last row 2024-11-13T00:29:40,287 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:29:40,287 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dc52a27dfbcfebe0e13c82666f5a1780:info 2024-11-13T00:29:40,401 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@76708b7c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741847_1030 to 127.0.0.1:33729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:40,401 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@78b76bc2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741852_1035 to 127.0.0.1:35189 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:40,680 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41233 {}] regionserver.HRegion(8855): Flush requested on dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:41,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc52a27dfbcfebe0e13c82666f5a1780 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T00:29:41,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/8b32eb0b6b79481ebafb567520170af4 is 1079, key is tmprow/info:/1731457781163/Put/seqid=0 2024-11-13T00:29:41,173 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,174 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:41,174 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741868_1051 2024-11-13T00:29:41,174 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:41,176 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,176 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:41,176 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741869_1052 2024-11-13T00:29:41,177 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] 2024-11-13T00:29:41,178 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,179 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:41,179 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741870_1053 2024-11-13T00:29:41,179 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:41,181 WARN [Thread-957 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,181 WARN [Thread-957 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:41,181 WARN [Thread-957 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741871_1054 2024-11-13T00:29:41,182 WARN [Thread-957 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:41,183 WARN [IPC Server handler 2 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T00:29:41,183 WARN [IPC Server handler 2 on default port 34283 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T00:29:41,183 WARN [IPC Server handler 2 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T00:29:41,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741872_1055 (size=6027) 2024-11-13T00:29:41,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/8b32eb0b6b79481ebafb567520170af4 2024-11-13T00:29:41,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/8b32eb0b6b79481ebafb567520170af4 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/8b32eb0b6b79481ebafb567520170af4 2024-11-13T00:29:41,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/8b32eb0b6b79481ebafb567520170af4, entries=1, sequenceid=45, filesize=5.9 K 2024-11-13T00:29:41,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for dc52a27dfbcfebe0e13c82666f5a1780 in 44ms, sequenceid=45, compaction requested=false 2024-11-13T00:29:41,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc52a27dfbcfebe0e13c82666f5a1780: 2024-11-13T00:29:41,209 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-13T00:29:41,209 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:41,209 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/520eb4e18cb84176a4a6d232618f3798 because midkey is the same as first or last row 2024-11-13T00:29:41,454 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,691 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,691 WARN [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]] 2024-11-13T00:29:41,691 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C41233%2C1731457759323:(num 1731457779674) roll requested 2024-11-13T00:29:41,691 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C41233%2C1731457759323.1731457781691 2024-11-13T00:29:41,694 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,695 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK], DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:41,695 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741873_1056 2024-11-13T00:29:41,695 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:41,696 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,696 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:41,696 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741874_1057 2024-11-13T00:29:41,697 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:41,699 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,699 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53398 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741875_1058 to mirror 127.0.0.1:40983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:41,699 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:41,699 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741875_1058 2024-11-13T00:29:41,699 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53398 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T00:29:41,700 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53398 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53398 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:41,700 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:41,701 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:41,701 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:41,701 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741876_1059 2024-11-13T00:29:41,701 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] 2024-11-13T00:29:41,702 WARN [IPC Server handler 3 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T00:29:41,702 WARN [IPC Server handler 3 on default port 34283 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T00:29:41,702 WARN [IPC Server handler 3 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T00:29:41,705 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:41,705 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:41,705 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:41,705 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:41,705 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:41,705 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457779674 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457781691 2024-11-13T00:29:41,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741857_1040 (size=13591) 2024-11-13T00:29:41,712 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34369:34369)] 2024-11-13T00:29:41,712 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 is not closed yet, will try archiving it next time 2024-11-13T00:29:41,712 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457779674 is not closed yet, will try archiving it next time 2024-11-13T00:29:41,712 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457775659 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs/cb28580903fb%2C41233%2C1731457759323.1731457775659 2024-11-13T00:29:42,108 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 is not closed yet, will try archiving it next time 2024-11-13T00:29:42,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41233 {}] regionserver.HRegion(8855): Flush requested on dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:42,587 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc52a27dfbcfebe0e13c82666f5a1780 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T00:29:42,593 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/a7e49eac632c4ac0b9dc24fae9c1fb65 is 1079, key is tmprow/info:/1731457782585/Put/seqid=0 2024-11-13T00:29:42,595 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:42,595 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:42,595 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741878_1061 2024-11-13T00:29:42,596 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:42,601 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:42,601 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53418 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741879_1062 to mirror 127.0.0.1:40983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:42,602 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:42,602 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741879_1062 2024-11-13T00:29:42,602 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53418 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:42,602 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53418 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53418 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:42,602 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:42,603 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:42,603 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:42,604 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741880_1063 2024-11-13T00:29:42,604 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] 2024-11-13T00:29:42,605 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:42,605 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:42,605 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741881_1064 2024-11-13T00:29:42,606 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:42,606 WARN [IPC Server handler 1 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T00:29:42,606 WARN [IPC Server handler 1 on default port 34283 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T00:29:42,607 WARN [IPC Server handler 1 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T00:29:42,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741882_1065 (size=6027) 2024-11-13T00:29:42,680 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:43,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/a7e49eac632c4ac0b9dc24fae9c1fb65 2024-11-13T00:29:43,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/a7e49eac632c4ac0b9dc24fae9c1fb65 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/a7e49eac632c4ac0b9dc24fae9c1fb65 2024-11-13T00:29:43,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/a7e49eac632c4ac0b9dc24fae9c1fb65, entries=1, sequenceid=55, filesize=5.9 K 2024-11-13T00:29:43,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for dc52a27dfbcfebe0e13c82666f5a1780 in 440ms, sequenceid=55, compaction requested=true 2024-11-13T00:29:43,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc52a27dfbcfebe0e13c82666f5a1780: 2024-11-13T00:29:43,027 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-13T00:29:43,027 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:43,027 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/520eb4e18cb84176a4a6d232618f3798 because midkey is the same as first or last row 2024-11-13T00:29:43,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dc52a27dfbcfebe0e13c82666f5a1780:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:29:43,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:29:43,027 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:29:43,028 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:29:43,029 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HStore(1541): dc52a27dfbcfebe0e13c82666f5a1780/info is initiating minor compaction (all files) 2024-11-13T00:29:43,029 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dc52a27dfbcfebe0e13c82666f5a1780/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:43,029 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/520eb4e18cb84176a4a6d232618f3798, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/8b32eb0b6b79481ebafb567520170af4, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/a7e49eac632c4ac0b9dc24fae9c1fb65] into tmpdir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp, totalSize=29.3 K 2024-11-13T00:29:43,029 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.Compactor(225): Compacting 520eb4e18cb84176a4a6d232618f3798, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731457773676 2024-11-13T00:29:43,030 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8b32eb0b6b79481ebafb567520170af4, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731457781163 2024-11-13T00:29:43,030 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.Compactor(225): Compacting a7e49eac632c4ac0b9dc24fae9c1fb65, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731457782585 2024-11-13T00:29:43,049 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dc52a27dfbcfebe0e13c82666f5a1780#info#compaction#24 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:29:43,050 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/d492e4b2a05647eca0190de6b1212eda is 1080, key is row0002/info:/1731457773676/Put/seqid=0 2024-11-13T00:29:43,053 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:43,053 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53426 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741883_1066 to mirror 127.0.0.1:40983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:43,053 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:43,053 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741883_1066 2024-11-13T00:29:43,053 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53426 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:43,053 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53426 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53426 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:43,054 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:43,055 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:43,055 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:43,055 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741884_1067 2024-11-13T00:29:43,056 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:43,057 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:43,058 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]) is bad. 2024-11-13T00:29:43,058 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741885_1068 2024-11-13T00:29:43,058 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35189,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK] 2024-11-13T00:29:43,066 WARN [Thread-971 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:43,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53440 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741886_1069 to mirror 127.0.0.1:33729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:43,066 WARN [Thread-971 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:43,066 WARN [Thread-971 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741886_1069 2024-11-13T00:29:43,066 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53440 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:43,066 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:53440 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53440 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:43,066 WARN [Thread-971 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:43,067 WARN [IPC Server handler 3 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-13T00:29:43,068 WARN [IPC Server handler 3 on default port 34283 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-13T00:29:43,068 WARN [IPC Server handler 3 on default port 34283 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-13T00:29:43,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741887_1070 (size=18097) 2024-11-13T00:29:43,083 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/d492e4b2a05647eca0190de6b1212eda as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/d492e4b2a05647eca0190de6b1212eda 2024-11-13T00:29:43,094 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dc52a27dfbcfebe0e13c82666f5a1780/info of dc52a27dfbcfebe0e13c82666f5a1780 into d492e4b2a05647eca0190de6b1212eda(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:29:43,094 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dc52a27dfbcfebe0e13c82666f5a1780: 2024-11-13T00:29:43,094 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780., storeName=dc52a27dfbcfebe0e13c82666f5a1780/info, priority=13, startTime=1731457783027; duration=0sec 2024-11-13T00:29:43,094 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-13T00:29:43,094 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:43,094 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/d492e4b2a05647eca0190de6b1212eda because midkey is the same as first or last row 2024-11-13T00:29:43,095 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-13T00:29:43,095 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:43,095 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/d492e4b2a05647eca0190de6b1212eda because midkey is the same as first or last row 2024-11-13T00:29:43,095 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-13T00:29:43,095 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:43,095 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/d492e4b2a05647eca0190de6b1212eda because midkey is the same as first or last row 2024-11-13T00:29:43,095 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:29:43,095 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dc52a27dfbcfebe0e13c82666f5a1780:info 2024-11-13T00:29:43,400 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@76708b7c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741862_1045 to 127.0.0.1:33729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:43,400 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@78b76bc2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741842_1025 to 127.0.0.1:39667 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:43,455 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:43,712 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:43,713 WARN [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-13T00:29:43,812 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:29:43,815 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:29:43,816 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:29:43,816 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:29:43,816 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:29:43,816 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1716e69f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:29:43,817 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30746b96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:29:43,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4bb74c79{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/java.io.tmpdir/jetty-localhost-40515-hadoop-hdfs-3_4_1-tests_jar-_-any-6996325388953240293/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:29:43,933 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@79e13ed6{HTTP/1.1, (http/1.1)}{localhost:40515} 2024-11-13T00:29:43,933 INFO [Time-limited test {}] server.Server(415): Started @129781ms 2024-11-13T00:29:43,934 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:29:44,043 WARN [Thread-991 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:29:44,054 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb514f6e02e5ce87 with lease ID 0x8693d6b9cb09435b: from storage DS-0a2d533a-4bec-41d8-9594-717d0345b537 node DatanodeRegistration(127.0.0.1:35783, datanodeUuid=15755521-cc20-4ae7-8d60-99b27f2bc8a1, infoPort=35589, infoSecurePort=0, ipcPort=37549, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 6, hasStaleStorage: false, processing time: 4 msecs, invalidatedBlocks: 0 2024-11-13T00:29:44,054 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcb514f6e02e5ce87 with lease ID 0x8693d6b9cb09435b: from storage DS-c0d089db-e3f0-432f-bc7e-76b3938bf782 node DatanodeRegistration(127.0.0.1:35783, datanodeUuid=15755521-cc20-4ae7-8d60-99b27f2bc8a1, infoPort=35589, infoSecurePort=0, ipcPort=37549, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:29:44,398 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@78b76bc2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741867_1050 to 127.0.0.1:40983 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:44,398 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@76708b7c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741872_1055 to 127.0.0.1:33729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:44,680 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:45,455 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:45,713 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:46,399 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@76708b7c[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741857_1040 to 127.0.0.1:33729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:46,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741882_1065 (size=6027) 2024-11-13T00:29:46,681 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:47,399 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@78b76bc2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741887_1070 to 127.0.0.1:33729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:47,455 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:47,713 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:48,681 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:49,190 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T00:29:49,456 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:49,645 ERROR [FSHLog-0-hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData-prefix:cb28580903fb,32917,1731457759225 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:49,646 WARN [FSHLog-0-hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData-prefix:cb28580903fb,32917,1731457759225 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:49,646 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C32917%2C1731457759225:(num 1731457759520) roll requested 2024-11-13T00:29:49,646 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C32917%2C1731457759225.1731457789646 2024-11-13T00:29:49,652 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:49,653 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:49,653 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:49,653 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:49,653 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:49,653 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457789646 2024-11-13T00:29:49,654 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:49,654 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:49,654 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 2024-11-13T00:29:49,654 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35589:35589),(127.0.0.1/127.0.0.1:34369:34369)] 2024-11-13T00:29:49,654 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 is not closed yet, will try archiving it next time 2024-11-13T00:29:49,655 WARN [IPC Server handler 2 on default port 34283 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741830_1014 2024-11-13T00:29:49,655 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 after 1ms 2024-11-13T00:29:49,714 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:50,681 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:51,714 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:52,682 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:53,656 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 after 4002ms 2024-11-13T00:29:53,714 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:54,067 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@1f33606e {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-155329190-172.17.0.2-1731457758228:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:39667,null,null]) java.net.ConnectException: Call From cb28580903fb/172.17.0.2 to localhost:33819 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T00:29:54,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741833_1021 (size=455) 2024-11-13T00:29:54,670 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457759850 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs/cb28580903fb%2C41233%2C1731457759323.1731457759850 2024-11-13T00:29:54,671 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457779674 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs/cb28580903fb%2C41233%2C1731457759323.1731457779674 2024-11-13T00:29:54,682 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:55,047 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@317b5da[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35783, datanodeUuid=15755521-cc20-4ae7-8d60-99b27f2bc8a1, infoPort=35589, infoSecurePort=0, ipcPort=37549, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741833_1021 to 127.0.0.1:33729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:55,715 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:56,682 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,265 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C41233%2C1731457759323.1731457797265 2024-11-13T00:29:57,267 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,268 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741889_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:57,268 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741889_1073 2024-11-13T00:29:57,268 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:57,269 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,270 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:57,270 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741890_1074 2024-11-13T00:29:57,270 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:57,272 WARN [Thread-1020 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,272 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_10793125_22 at /127.0.0.1:54612 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data4]'}, localName='127.0.0.1:35783', datanodeUuid='15755521-cc20-4ae7-8d60-99b27f2bc8a1', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741891_1075 to mirror 127.0.0.1:33729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:57,272 WARN [Thread-1020 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35783,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:57,272 WARN [Thread-1020 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741891_1075 2024-11-13T00:29:57,272 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_10793125_22 at /127.0.0.1:54612 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-13T00:29:57,273 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_10793125_22 at /127.0.0.1:54612 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:35783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54612 dst: /127.0.0.1:35783 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:57,273 WARN [Thread-1020 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:57,277 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,277 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,277 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,277 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,277 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,277 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457781691 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457797265 2024-11-13T00:29:57,278 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35589:35589),(127.0.0.1/127.0.0.1:34369:34369)] 2024-11-13T00:29:57,278 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457781691 is not closed yet, will try archiving it next time 2024-11-13T00:29:57,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741877_1060 (size=12911) 2024-11-13T00:29:57,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41233 {}] regionserver.HRegion(8855): Flush requested on dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:57,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc52a27dfbcfebe0e13c82666f5a1780 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-13T00:29:57,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/c2b4d2f48b224ca1b5c8fd4da2118b2c is 1080, key is row0013/info:/1731457797279/Put/seqid=0 2024-11-13T00:29:57,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741893_1077 (size=8190) 2024-11-13T00:29:57,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741893_1077 (size=8190) 2024-11-13T00:29:57,296 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/c2b4d2f48b224ca1b5c8fd4da2118b2c 2024-11-13T00:29:57,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/c2b4d2f48b224ca1b5c8fd4da2118b2c as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/c2b4d2f48b224ca1b5c8fd4da2118b2c 2024-11-13T00:29:57,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/c2b4d2f48b224ca1b5c8fd4da2118b2c, entries=3, sequenceid=66, filesize=8.0 K 2024-11-13T00:29:57,309 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for dc52a27dfbcfebe0e13c82666f5a1780 in 27ms, sequenceid=66, compaction requested=false 2024-11-13T00:29:57,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc52a27dfbcfebe0e13c82666f5a1780: 2024-11-13T00:29:57,309 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-13T00:29:57,309 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:57,309 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/d492e4b2a05647eca0190de6b1212eda because midkey is the same as first or last row 2024-11-13T00:29:57,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41233 {}] regionserver.HRegion(8855): Flush requested on dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:57,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing dc52a27dfbcfebe0e13c82666f5a1780 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-13T00:29:57,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/5f999889e8844e47a11388d562b3018b is 1080, key is row0015/info:/1731457797283/Put/seqid=0 2024-11-13T00:29:57,507 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,507 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:57,507 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741894_1078 2024-11-13T00:29:57,508 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:57,510 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1079 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,510 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54636 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741895_1079] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data4]'}, localName='127.0.0.1:35783', datanodeUuid='15755521-cc20-4ae7-8d60-99b27f2bc8a1', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741895_1079 to mirror 127.0.0.1:40983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:57,510 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741895_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35783,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:57,510 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741895_1079 2024-11-13T00:29:57,510 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54636 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741895_1079] {}] datanode.BlockReceiver(316): Block 1073741895 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:57,510 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54636 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741895_1079] {}] datanode.DataXceiver(331): 127.0.0.1:35783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54636 dst: /127.0.0.1:35783 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:57,511 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:57,512 WARN [Thread-1035 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,512 WARN [Thread-1035 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:57,512 WARN [Thread-1035 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741896_1080 2024-11-13T00:29:57,512 WARN [Thread-1035 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:57,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741897_1081 (size=14660) 2024-11-13T00:29:57,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741897_1081 (size=14660) 2024-11-13T00:29:57,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/5f999889e8844e47a11388d562b3018b 2024-11-13T00:29:57,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/5f999889e8844e47a11388d562b3018b as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/5f999889e8844e47a11388d562b3018b 2024-11-13T00:29:57,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/5f999889e8844e47a11388d562b3018b, entries=9, sequenceid=79, filesize=14.3 K 2024-11-13T00:29:57,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10758, heapSize ~11.48 KB/11760, currentSize=0 B/0 for dc52a27dfbcfebe0e13c82666f5a1780 in 31ms, sequenceid=79, compaction requested=true 2024-11-13T00:29:57,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for dc52a27dfbcfebe0e13c82666f5a1780: 2024-11-13T00:29:57,531 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-13T00:29:57,531 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:57,531 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/d492e4b2a05647eca0190de6b1212eda because midkey is the same as first or last row 2024-11-13T00:29:57,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store dc52a27dfbcfebe0e13c82666f5a1780:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:29:57,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:29:57,532 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:29:57,533 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:29:57,533 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HStore(1541): dc52a27dfbcfebe0e13c82666f5a1780/info is initiating minor compaction (all files) 2024-11-13T00:29:57,533 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of dc52a27dfbcfebe0e13c82666f5a1780/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:57,533 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/d492e4b2a05647eca0190de6b1212eda, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/c2b4d2f48b224ca1b5c8fd4da2118b2c, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/5f999889e8844e47a11388d562b3018b] into tmpdir=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp, totalSize=40.0 K 2024-11-13T00:29:57,534 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.Compactor(225): Compacting d492e4b2a05647eca0190de6b1212eda, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731457773676 2024-11-13T00:29:57,534 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.Compactor(225): Compacting c2b4d2f48b224ca1b5c8fd4da2118b2c, keycount=3, bloomtype=ROW, size=8.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731457783601 2024-11-13T00:29:57,535 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f999889e8844e47a11388d562b3018b, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731457797283 2024-11-13T00:29:57,549 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): dc52a27dfbcfebe0e13c82666f5a1780#info#compaction#27 average throughput is 11.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:29:57,550 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/f7e313e00a804c23ac4a2676cf1df1f8 is 1080, key is row0002/info:/1731457773676/Put/seqid=0 2024-11-13T00:29:57,552 WARN [Thread-1044 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,552 WARN [Thread-1044 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741898_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:57,552 WARN [Thread-1044 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741898_1082 2024-11-13T00:29:57,553 WARN [Thread-1044 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:57,554 WARN [Thread-1044 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,554 WARN [Thread-1044 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741899_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:57,554 WARN [Thread-1044 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741899_1083 2024-11-13T00:29:57,554 WARN [Thread-1044 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:57,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741900_1084 (size=28989) 2024-11-13T00:29:57,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741900_1084 (size=28989) 2024-11-13T00:29:57,566 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/f7e313e00a804c23ac4a2676cf1df1f8 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/f7e313e00a804c23ac4a2676cf1df1f8 2024-11-13T00:29:57,574 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in dc52a27dfbcfebe0e13c82666f5a1780/info of dc52a27dfbcfebe0e13c82666f5a1780 into f7e313e00a804c23ac4a2676cf1df1f8(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:29:57,574 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for dc52a27dfbcfebe0e13c82666f5a1780: 2024-11-13T00:29:57,574 INFO [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780., storeName=dc52a27dfbcfebe0e13c82666f5a1780/info, priority=13, startTime=1731457797531; duration=0sec 2024-11-13T00:29:57,574 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-13T00:29:57,574 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:57,574 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/f7e313e00a804c23ac4a2676cf1df1f8 because midkey is the same as first or last row 2024-11-13T00:29:57,575 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-13T00:29:57,575 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:57,575 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/f7e313e00a804c23ac4a2676cf1df1f8 because midkey is the same as first or last row 2024-11-13T00:29:57,575 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-13T00:29:57,575 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:29:57,575 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/f7e313e00a804c23ac4a2676cf1df1f8 because midkey is the same as first or last row 2024-11-13T00:29:57,575 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:29:57,575 DEBUG [RS:0;cb28580903fb:41233-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: dc52a27dfbcfebe0e13c82666f5a1780:info 2024-11-13T00:29:57,680 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.1731457781691 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs/cb28580903fb%2C41233%2C1731457759323.1731457781691 2024-11-13T00:29:57,715 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,715 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-13T00:29:57,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T00:29:57,905 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:29:57,905 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:29:57,905 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:57,905 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:57,905 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T00:29:57,906 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T00:29:57,906 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1036791079, stopped=false 2024-11-13T00:29:57,906 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=cb28580903fb,32917,1731457759225 2024-11-13T00:29:57,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:57,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:57,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:57,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:57,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35015-0x1010ebaa2690002, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:29:57,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35015-0x1010ebaa2690002, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:29:57,910 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:57,910 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:57,911 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35015-0x1010ebaa2690002, quorum=127.0.0.1:62272, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:29:57,912 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:29:57,913 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:29:57,913 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:29:57,913 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:57,913 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'cb28580903fb,41233,1731457759323' ***** 2024-11-13T00:29:57,913 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T00:29:57,913 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'cb28580903fb,35015,1731457760593' ***** 2024-11-13T00:29:57,913 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T00:29:57,914 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T00:29:57,914 INFO [RS:1;cb28580903fb:35015 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T00:29:57,914 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T00:29:57,914 INFO [RS:0;cb28580903fb:41233 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T00:29:57,914 INFO [RS:1;cb28580903fb:35015 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T00:29:57,914 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T00:29:57,914 INFO [RS:0;cb28580903fb:41233 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T00:29:57,914 INFO [RS:1;cb28580903fb:35015 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T00:29:57,914 INFO [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(959): stopping server cb28580903fb,35015,1731457760593 2024-11-13T00:29:57,914 INFO [RS:1;cb28580903fb:35015 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:29:57,914 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(3091): Received CLOSE for dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:57,914 INFO [RS:1;cb28580903fb:35015 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;cb28580903fb:35015. 2024-11-13T00:29:57,914 DEBUG [RS:1;cb28580903fb:35015 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:29:57,914 DEBUG [RS:1;cb28580903fb:35015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:57,914 INFO [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(976): stopping server cb28580903fb,35015,1731457760593; all regions closed. 2024-11-13T00:29:57,915 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(959): stopping server cb28580903fb,41233,1731457759323 2024-11-13T00:29:57,915 INFO [RS:0;cb28580903fb:41233 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:29:57,915 INFO [RS:0;cb28580903fb:41233 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;cb28580903fb:41233. 2024-11-13T00:29:57,915 DEBUG [RS:0;cb28580903fb:41233 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:29:57,915 DEBUG [RS:0;cb28580903fb:41233 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:29:57,915 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing dc52a27dfbcfebe0e13c82666f5a1780, disabling compactions & flushes 2024-11-13T00:29:57,915 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:57,915 INFO [RS:0;cb28580903fb:41233 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T00:29:57,915 INFO [RS:0;cb28580903fb:41233 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T00:29:57,915 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:57,915 INFO [RS:0;cb28580903fb:41233 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T00:29:57,915 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. after waiting 0 ms 2024-11-13T00:29:57,915 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T00:29:57,916 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:57,916 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing dc52a27dfbcfebe0e13c82666f5a1780 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T00:29:57,916 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T00:29:57,916 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,916 DEBUG [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1325): Online Regions={dc52a27dfbcfebe0e13c82666f5a1780=TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780., 1588230740=hbase:meta,,1.1588230740} 2024-11-13T00:29:57,916 DEBUG [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, dc52a27dfbcfebe0e13c82666f5a1780 2024-11-13T00:29:57,917 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,917 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:29:57,917 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,917 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:29:57,917 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,917 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,918 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:29:57,918 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:29:57,918 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:29:57,918 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-13T00:29:57,918 ERROR [FSHLog-0-hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79-prefix:cb28580903fb,41233,1731457759323.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,918 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,919 WARN [FSHLog-0-hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79-prefix:cb28580903fb,41233,1731457759323.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,919 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,919 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 2024-11-13T00:29:57,919 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C41233%2C1731457759323.meta:.meta(num 1731457760430) roll requested 2024-11-13T00:29:57,919 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C41233%2C1731457759323.meta.1731457797919.meta 2024-11-13T00:29:57,919 WARN [IPC Server handler 1 on default port 34283 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 has not been closed. Lease recovery is in progress. RecoveryId = 1085 for block blk_1073741837_1013 2024-11-13T00:29:57,920 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 after 0ms 2024-11-13T00:29:57,922 WARN [Thread-1050 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1086 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,922 WARN [Thread-1050 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741901_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:57,922 WARN [Thread-1050 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741901_1086 2024-11-13T00:29:57,923 WARN [Thread-1050 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:57,926 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/7b8e0ecdef4a4187a01a78149efdda73 is 1079, key is tmprow/info:/1731457797702/Put/seqid=0 2024-11-13T00:29:57,927 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,927 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,927 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,928 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,928 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:57,928 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457797919.meta 2024-11-13T00:29:57,929 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,929 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54672 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741903_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data4]'}, localName='127.0.0.1:35783', datanodeUuid='15755521-cc20-4ae7-8d60-99b27f2bc8a1', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741903_1088 to mirror 127.0.0.1:33729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:57,929 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741903_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35783,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:57,929 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54672 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741903_1088] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:57,929 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741903_1088 2024-11-13T00:29:57,929 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54672 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741903_1088] {}] datanode.DataXceiver(331): 127.0.0.1:35783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54672 dst: /127.0.0.1:35783 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:57,930 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,930 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,930 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:57,930 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta 2024-11-13T00:29:57,930 WARN [IPC Server handler 2 on default port 34283 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta has not been closed. Lease recovery is in progress. RecoveryId = 1089 for block blk_1073741834_1010 2024-11-13T00:29:57,931 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta after 1ms 2024-11-13T00:29:57,931 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,931 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:57,932 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741904_1090 2024-11-13T00:29:57,932 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:57,941 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34369:34369),(127.0.0.1/127.0.0.1:35589:35589)] 2024-11-13T00:29:57,941 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta is not closed yet, will try archiving it next time 2024-11-13T00:29:57,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741905_1091 (size=6027) 2024-11-13T00:29:57,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741905_1091 (size=6027) 2024-11-13T00:29:57,963 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=84 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/7b8e0ecdef4a4187a01a78149efdda73 2024-11-13T00:29:57,971 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/.tmp/info/7b8e0ecdef4a4187a01a78149efdda73 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/7b8e0ecdef4a4187a01a78149efdda73 2024-11-13T00:29:57,979 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/.tmp/info/55b478cb8b2f4ddca7731f687d8e6da0 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780./info:regioninfo/1731457761108/Put/seqid=0 2024-11-13T00:29:57,981 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/7b8e0ecdef4a4187a01a78149efdda73, entries=1, sequenceid=84, filesize=5.9 K 2024-11-13T00:29:57,982 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,982 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK], DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:57,982 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741906_1092 2024-11-13T00:29:57,983 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for dc52a27dfbcfebe0e13c82666f5a1780 in 67ms, sequenceid=84, compaction requested=false 2024-11-13T00:29:57,983 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:57,983 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/fb255967502f496c86a4cf073e29de33, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/be7107dcf0074c4696513de34dd08e81, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/520eb4e18cb84176a4a6d232618f3798, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/33c2781e4d064cbd98855f5e8824dd64, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/8b32eb0b6b79481ebafb567520170af4, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/d492e4b2a05647eca0190de6b1212eda, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/a7e49eac632c4ac0b9dc24fae9c1fb65, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/c2b4d2f48b224ca1b5c8fd4da2118b2c, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/5f999889e8844e47a11388d562b3018b] to archive 2024-11-13T00:29:57,986 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40983 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:57,986 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:45234 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741907_1093] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741907_1093 to mirror 127.0.0.1:40983 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:57,986 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T00:29:57,986 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:57,986 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741907_1093 2024-11-13T00:29:57,986 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:45234 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741907_1093] {}] datanode.BlockReceiver(316): Block 1073741907 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:57,986 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:45234 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741907_1093] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45234 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:57,987 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:57,989 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/fb255967502f496c86a4cf073e29de33 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/fb255967502f496c86a4cf073e29de33 2024-11-13T00:29:57,992 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/be7107dcf0074c4696513de34dd08e81 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/be7107dcf0074c4696513de34dd08e81 2024-11-13T00:29:57,994 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/520eb4e18cb84176a4a6d232618f3798 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/520eb4e18cb84176a4a6d232618f3798 2024-11-13T00:29:57,996 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/33c2781e4d064cbd98855f5e8824dd64 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/33c2781e4d064cbd98855f5e8824dd64 2024-11-13T00:29:57,998 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/8b32eb0b6b79481ebafb567520170af4 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/8b32eb0b6b79481ebafb567520170af4 2024-11-13T00:29:57,999 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/d492e4b2a05647eca0190de6b1212eda to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/d492e4b2a05647eca0190de6b1212eda 2024-11-13T00:29:58,001 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/a7e49eac632c4ac0b9dc24fae9c1fb65 to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/a7e49eac632c4ac0b9dc24fae9c1fb65 2024-11-13T00:29:58,003 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/c2b4d2f48b224ca1b5c8fd4da2118b2c to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/c2b4d2f48b224ca1b5c8fd4da2118b2c 2024-11-13T00:29:58,005 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/5f999889e8844e47a11388d562b3018b to hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/info/5f999889e8844e47a11388d562b3018b 2024-11-13T00:29:58,006 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=cb28580903fb:32917 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T00:29:58,006 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [fb255967502f496c86a4cf073e29de33=10347, be7107dcf0074c4696513de34dd08e81=12506, 520eb4e18cb84176a4a6d232618f3798=17994, 33c2781e4d064cbd98855f5e8824dd64=6027, 8b32eb0b6b79481ebafb567520170af4=6027, d492e4b2a05647eca0190de6b1212eda=18097, a7e49eac632c4ac0b9dc24fae9c1fb65=6027, c2b4d2f48b224ca1b5c8fd4da2118b2c=8190, 5f999889e8844e47a11388d562b3018b=14660] 2024-11-13T00:29:58,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741908_1094 (size=7089) 2024-11-13T00:29:58,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741908_1094 (size=7089) 2024-11-13T00:29:58,010 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/.tmp/info/55b478cb8b2f4ddca7731f687d8e6da0 2024-11-13T00:29:58,013 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/default/TestLogRolling-testLogRollOnDatanodeDeath/dc52a27dfbcfebe0e13c82666f5a1780/recovered.edits/87.seqid, newMaxSeqId=87, maxSeqId=1 2024-11-13T00:29:58,014 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:58,014 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for dc52a27dfbcfebe0e13c82666f5a1780: Waiting for close lock at 1731457797915Running coprocessor pre-close hooks at 1731457797915Disabling compacts and flushes for region at 1731457797915Disabling writes for close at 1731457797915Obtaining lock to block concurrent updates at 1731457797916 (+1 ms)Preparing flush snapshotting stores in dc52a27dfbcfebe0e13c82666f5a1780 at 1731457797916Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780., syncing WAL and waiting on mvcc, flushsize=dataSize=1075, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731457797916Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. at 1731457797918 (+2 ms)Flushing dc52a27dfbcfebe0e13c82666f5a1780/info: creating writer at 1731457797918Flushing dc52a27dfbcfebe0e13c82666f5a1780/info: appending metadata at 1731457797925 (+7 ms)Flushing dc52a27dfbcfebe0e13c82666f5a1780/info: closing flushed file at 1731457797925Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c420089: reopening flushed file at 1731457797971 (+46 ms)Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for dc52a27dfbcfebe0e13c82666f5a1780 in 67ms, sequenceid=84, compaction requested=false at 1731457797983 (+12 ms)Writing region close event to WAL at 1731457798007 (+24 ms)Running coprocessor post-close hooks at 1731457798014 (+7 ms)Closed at 1731457798014 2024-11-13T00:29:58,014 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731457760725.dc52a27dfbcfebe0e13c82666f5a1780. 2024-11-13T00:29:58,034 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/.tmp/ns/95a401acdf3441bcb851caa3a9f93342 is 43, key is default/ns:d/1731457760493/Put/seqid=0 2024-11-13T00:29:58,037 WARN [Thread-1071 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741909_1095 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:58,038 WARN [Thread-1071 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741909_1095 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35783,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK], DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK]) is bad. 2024-11-13T00:29:58,038 WARN [Thread-1071 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741909_1095 2024-11-13T00:29:58,037 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54692 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741909_1095] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data4]'}, localName='127.0.0.1:35783', datanodeUuid='15755521-cc20-4ae7-8d60-99b27f2bc8a1', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741909_1095 to mirror 127.0.0.1:33729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:58,038 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54692 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741909_1095] {}] datanode.BlockReceiver(316): Block 1073741909 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:58,038 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:54692 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741909_1095] {}] datanode.DataXceiver(331): 127.0.0.1:35783:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54692 dst: /127.0.0.1:35783 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:58,038 WARN [Thread-1071 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33729,DS-ce41437c-0062-4f42-ab04-40d754994e31,DISK] 2024-11-13T00:29:58,040 WARN [Thread-1071 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741910_1096 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:58,040 WARN [Thread-1071 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741910_1096 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK], DatanodeInfoWithStorage[127.0.0.1:35783,DS-0a2d533a-4bec-41d8-9594-717d0345b537,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK]) is bad. 2024-11-13T00:29:58,040 WARN [Thread-1071 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741910_1096 2024-11-13T00:29:58,040 WARN [Thread-1071 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40983,DS-e39e4d54-cf28-41f3-99eb-0e53cf82d9be,DISK] 2024-11-13T00:29:58,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741911_1097 (size=5153) 2024-11-13T00:29:58,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741911_1097 (size=5153) 2024-11-13T00:29:58,117 DEBUG [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T00:29:58,317 DEBUG [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T00:29:58,399 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@78b76bc2[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35479, datanodeUuid=71bcd43b-d601-405d-8eb0-235245acf278, infoPort=34369, infoSecurePort=0, ipcPort=45923, storageInfo=lv=-57;cid=testClusterID;nsid=1847167993;c=1731457758228):Failed to transfer BP-155329190-172.17.0.2-1731457758228:blk_1073741877_1060 to 127.0.0.1:33729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:58,451 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/.tmp/ns/95a401acdf3441bcb851caa3a9f93342 2024-11-13T00:29:58,473 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/.tmp/table/b43cf9fe984d4613bbee288ac2935223 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731457761125/Put/seqid=0 2024-11-13T00:29:58,476 WARN [Thread-1080 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741912_1098 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39667 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:29:58,476 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:45258 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741912_1098] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10]'}, localName='127.0.0.1:35479', datanodeUuid='71bcd43b-d601-405d-8eb0-235245acf278', xmitsInProgress=0}:Exception transferring block BP-155329190-172.17.0.2-1731457758228:blk_1073741912_1098 to mirror 127.0.0.1:39667 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:58,476 WARN [Thread-1080 {}] hdfs.DataStreamer(1731): Error Recovery for BP-155329190-172.17.0.2-1731457758228:blk_1073741912_1098 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35479,DS-e751e6f0-c69c-4ed5-91de-2138aa6a5d68,DISK], DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK]) is bad. 2024-11-13T00:29:58,476 WARN [Thread-1080 {}] hdfs.DataStreamer(1850): Abandoning BP-155329190-172.17.0.2-1731457758228:blk_1073741912_1098 2024-11-13T00:29:58,476 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:45258 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741912_1098] {}] datanode.BlockReceiver(316): Block 1073741912 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-13T00:29:58,476 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_202239816_22 at /127.0.0.1:45258 [Receiving block BP-155329190-172.17.0.2-1731457758228:blk_1073741912_1098] {}] datanode.DataXceiver(331): 127.0.0.1:35479:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45258 dst: /127.0.0.1:35479 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:29:58,477 WARN [Thread-1080 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39667,DS-41660bf4-b5b7-4b75-9656-e64806834d3c,DISK] 2024-11-13T00:29:58,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741913_1099 (size=5424) 2024-11-13T00:29:58,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741913_1099 (size=5424) 2024-11-13T00:29:58,483 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/.tmp/table/b43cf9fe984d4613bbee288ac2935223 2024-11-13T00:29:58,489 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/.tmp/info/55b478cb8b2f4ddca7731f687d8e6da0 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/info/55b478cb8b2f4ddca7731f687d8e6da0 2024-11-13T00:29:58,495 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/info/55b478cb8b2f4ddca7731f687d8e6da0, entries=10, sequenceid=11, filesize=6.9 K 2024-11-13T00:29:58,496 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/.tmp/ns/95a401acdf3441bcb851caa3a9f93342 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/ns/95a401acdf3441bcb851caa3a9f93342 2024-11-13T00:29:58,501 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/ns/95a401acdf3441bcb851caa3a9f93342, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T00:29:58,502 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/.tmp/table/b43cf9fe984d4613bbee288ac2935223 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/table/b43cf9fe984d4613bbee288ac2935223 2024-11-13T00:29:58,507 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/table/b43cf9fe984d4613bbee288ac2935223, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T00:29:58,509 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 591ms, sequenceid=11, compaction requested=false 2024-11-13T00:29:58,513 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T00:29:58,514 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:29:58,514 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:29:58,514 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457797917Running coprocessor pre-close hooks at 1731457797917Disabling compacts and flushes for region at 1731457797917Disabling writes for close at 1731457797918 (+1 ms)Obtaining lock to block concurrent updates at 1731457797918Preparing flush snapshotting stores in 1588230740 at 1731457797918Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731457797919 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731457797942 (+23 ms)Flushing 1588230740/info: creating writer at 1731457797942Flushing 1588230740/info: appending metadata at 1731457797979 (+37 ms)Flushing 1588230740/info: closing flushed file at 1731457797979Flushing 1588230740/ns: creating writer at 1731457798017 (+38 ms)Flushing 1588230740/ns: appending metadata at 1731457798034 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731457798034Flushing 1588230740/table: creating writer at 1731457798457 (+423 ms)Flushing 1588230740/table: appending metadata at 1731457798473 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731457798473Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17513069: reopening flushed file at 1731457798489 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56edb607: reopening flushed file at 1731457798496 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3adcc3be: reopening flushed file at 1731457798502 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 591ms, sequenceid=11, compaction requested=false at 1731457798509 (+7 ms)Writing region close event to WAL at 1731457798510 (+1 ms)Running coprocessor post-close hooks at 1731457798514 (+4 ms)Closed at 1731457798514 2024-11-13T00:29:58,514 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T00:29:58,517 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(976): stopping server cb28580903fb,41233,1731457759323; all regions closed. 2024-11-13T00:29:58,517 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:58,518 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:58,518 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:58,518 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:58,518 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:29:58,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741902_1087 (size=825) 2024-11-13T00:29:58,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741902_1087 (size=825) 2024-11-13T00:29:58,677 INFO [regionserver/cb28580903fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T00:29:58,678 INFO [regionserver/cb28580903fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T00:29:58,679 INFO [regionserver/cb28580903fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:29:58,708 INFO [regionserver/cb28580903fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T00:29:58,708 INFO [regionserver/cb28580903fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T00:29:59,509 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-13T00:29:59,510 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:29:59,510 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T00:29:59,708 INFO [regionserver/cb28580903fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:30:00,512 INFO [master/cb28580903fb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T00:30:00,512 INFO [master/cb28580903fb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T00:30:01,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:30:01,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741836_1012 (size=76) 2024-11-13T00:30:01,921 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 after 4001ms 2024-11-13T00:30:01,932 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta after 4002ms 2024-11-13T00:30:02,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:30:02,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:30:02,919 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-13T00:30:02,928 DEBUG [RS:1;cb28580903fb:35015 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs 2024-11-13T00:30:02,928 INFO [RS:1;cb28580903fb:35015 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C35015%2C1731457760593:(num 1731457760827) 2024-11-13T00:30:02,928 DEBUG [RS:1;cb28580903fb:35015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:30:02,928 INFO [RS:1;cb28580903fb:35015 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:30:02,928 INFO [RS:1;cb28580903fb:35015 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:30:02,928 INFO [RS:1;cb28580903fb:35015 {}] hbase.ChoreService(370): Chore service for: regionserver/cb28580903fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T00:30:02,929 INFO [RS:1;cb28580903fb:35015 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T00:30:02,929 INFO [RS:1;cb28580903fb:35015 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T00:30:02,929 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:30:02,929 INFO [RS:1;cb28580903fb:35015 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T00:30:02,929 INFO [RS:1;cb28580903fb:35015 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:30:02,929 INFO [RS:1;cb28580903fb:35015 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35015 2024-11-13T00:30:02,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35015-0x1010ebaa2690002, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/cb28580903fb,35015,1731457760593 2024-11-13T00:30:02,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:30:02,933 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [cb28580903fb,35015,1731457760593] 2024-11-13T00:30:02,934 INFO [RS:1;cb28580903fb:35015 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:30:02,935 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/cb28580903fb,35015,1731457760593 already deleted, retry=false 2024-11-13T00:30:02,935 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; cb28580903fb,35015,1731457760593 expired; onlineServers=1 2024-11-13T00:30:03,015 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,027 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35015-0x1010ebaa2690002, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:30:03,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35015-0x1010ebaa2690002, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:30:03,038 INFO [RS:1;cb28580903fb:35015 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:30:03,038 INFO [RS:1;cb28580903fb:35015 {}] regionserver.HRegionServer(1031): Exiting; stopping=cb28580903fb,35015,1731457760593; zookeeper connection closed. 2024-11-13T00:30:03,038 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@c98de2d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@c98de2d 2024-11-13T00:30:03,518 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-13T00:30:03,522 DEBUG [RS:0;cb28580903fb:41233 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs 2024-11-13T00:30:03,522 INFO [RS:0;cb28580903fb:41233 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C41233%2C1731457759323.meta:.meta(num 1731457797919) 2024-11-13T00:30:03,523 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:03,523 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:03,523 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:03,523 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:03,523 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:03,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741892_1076 (size=18156) 2024-11-13T00:30:03,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741892_1076 (size=18156) 2024-11-13T00:30:03,528 DEBUG [RS:0;cb28580903fb:41233 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/oldWALs 2024-11-13T00:30:03,528 INFO [RS:0;cb28580903fb:41233 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C41233%2C1731457759323:(num 1731457797265) 2024-11-13T00:30:03,528 DEBUG [RS:0;cb28580903fb:41233 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:30:03,528 INFO [RS:0;cb28580903fb:41233 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:30:03,528 INFO [RS:0;cb28580903fb:41233 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:30:03,528 INFO [RS:0;cb28580903fb:41233 {}] hbase.ChoreService(370): Chore service for: regionserver/cb28580903fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T00:30:03,528 INFO [RS:0;cb28580903fb:41233 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:30:03,528 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:30:03,529 INFO [RS:0;cb28580903fb:41233 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41233 2024-11-13T00:30:03,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:30:03,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/cb28580903fb,41233,1731457759323 2024-11-13T00:30:03,532 INFO [RS:0;cb28580903fb:41233 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:30:03,534 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [cb28580903fb,41233,1731457759323] 2024-11-13T00:30:03,535 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/cb28580903fb,41233,1731457759323 already deleted, retry=false 2024-11-13T00:30:03,535 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; cb28580903fb,41233,1731457759323 expired; onlineServers=0 2024-11-13T00:30:03,535 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'cb28580903fb,32917,1731457759225' ***** 2024-11-13T00:30:03,535 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T00:30:03,535 INFO [M:0;cb28580903fb:32917 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:30:03,535 INFO [M:0;cb28580903fb:32917 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:30:03,535 DEBUG [M:0;cb28580903fb:32917 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T00:30:03,536 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T00:30:03,536 DEBUG [M:0;cb28580903fb:32917 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T00:30:03,536 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457759649 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457759649,5,FailOnTimeoutGroup] 2024-11-13T00:30:03,536 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457759649 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457759649,5,FailOnTimeoutGroup] 2024-11-13T00:30:03,536 INFO [M:0;cb28580903fb:32917 {}] hbase.ChoreService(370): Chore service for: master/cb28580903fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T00:30:03,536 INFO [M:0;cb28580903fb:32917 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:30:03,536 DEBUG [M:0;cb28580903fb:32917 {}] master.HMaster(1795): Stopping service threads 2024-11-13T00:30:03,536 INFO [M:0;cb28580903fb:32917 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T00:30:03,536 INFO [M:0;cb28580903fb:32917 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:30:03,537 INFO [M:0;cb28580903fb:32917 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T00:30:03,537 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T00:30:03,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T00:30:03,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:03,537 DEBUG [M:0;cb28580903fb:32917 {}] zookeeper.ZKUtil(347): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T00:30:03,537 WARN [M:0;cb28580903fb:32917 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T00:30:03,538 INFO [M:0;cb28580903fb:32917 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/.lastflushedseqids 2024-11-13T00:30:03,541 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T00:30:03,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741914_1100 (size=130) 2024-11-13T00:30:03,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741914_1100 (size=130) 2024-11-13T00:30:03,553 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,555 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,560 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:03,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:30:03,634 INFO [RS:0;cb28580903fb:41233 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:30:03,634 INFO [RS:0;cb28580903fb:41233 {}] regionserver.HRegionServer(1031): Exiting; stopping=cb28580903fb,41233,1731457759323; zookeeper connection closed. 2024-11-13T00:30:03,634 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41233-0x1010ebaa2690001, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:30:03,634 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f9fb3ff {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f9fb3ff 2024-11-13T00:30:03,635 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-13T00:30:03,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:03,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:03,952 INFO [M:0;cb28580903fb:32917 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T00:30:03,952 INFO [M:0;cb28580903fb:32917 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T00:30:03,952 DEBUG [M:0;cb28580903fb:32917 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:30:03,952 INFO [M:0;cb28580903fb:32917 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:03,952 DEBUG [M:0;cb28580903fb:32917 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:03,952 DEBUG [M:0;cb28580903fb:32917 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:30:03,952 DEBUG [M:0;cb28580903fb:32917 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:03,953 INFO [M:0;cb28580903fb:32917 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-13T00:30:03,976 DEBUG [M:0;cb28580903fb:32917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b8b76f9df77a4787a3ca5f025e321d25 is 82, key is hbase:meta,,1/info:regioninfo/1731457760471/Put/seqid=0 2024-11-13T00:30:03,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741915_1101 (size=5672) 2024-11-13T00:30:03,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741915_1101 (size=5672) 2024-11-13T00:30:03,982 INFO [M:0;cb28580903fb:32917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b8b76f9df77a4787a3ca5f025e321d25 2024-11-13T00:30:04,005 DEBUG [M:0;cb28580903fb:32917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0cb5b543a4c4d15bbe6bea553345971 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731457761134/Put/seqid=0 2024-11-13T00:30:04,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741916_1102 (size=6255) 2024-11-13T00:30:04,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741916_1102 (size=6255) 2024-11-13T00:30:04,013 INFO [M:0;cb28580903fb:32917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0cb5b543a4c4d15bbe6bea553345971 2024-11-13T00:30:04,019 INFO [M:0;cb28580903fb:32917 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b0cb5b543a4c4d15bbe6bea553345971 2024-11-13T00:30:04,036 DEBUG [M:0;cb28580903fb:32917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45333840373444489075690c18616dba is 69, key is cb28580903fb,35015,1731457760593/rs:state/1731457760655/Put/seqid=0 2024-11-13T00:30:04,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741917_1103 (size=5224) 2024-11-13T00:30:04,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741917_1103 (size=5224) 2024-11-13T00:30:04,045 INFO [M:0;cb28580903fb:32917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45333840373444489075690c18616dba 2024-11-13T00:30:04,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:30:04,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741835_1011 (size=393) 2024-11-13T00:30:04,070 DEBUG [M:0;cb28580903fb:32917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/10b71bfc6b0c44ce843479a6b10b68eb is 52, key is load_balancer_on/state:d/1731457760573/Put/seqid=0 2024-11-13T00:30:04,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741918_1104 (size=5056) 2024-11-13T00:30:04,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741918_1104 (size=5056) 2024-11-13T00:30:04,076 INFO [M:0;cb28580903fb:32917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/10b71bfc6b0c44ce843479a6b10b68eb 2024-11-13T00:30:04,083 DEBUG [M:0;cb28580903fb:32917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b8b76f9df77a4787a3ca5f025e321d25 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b8b76f9df77a4787a3ca5f025e321d25 2024-11-13T00:30:04,090 INFO [M:0;cb28580903fb:32917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b8b76f9df77a4787a3ca5f025e321d25, entries=8, sequenceid=60, filesize=5.5 K 2024-11-13T00:30:04,091 DEBUG [M:0;cb28580903fb:32917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0cb5b543a4c4d15bbe6bea553345971 as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0cb5b543a4c4d15bbe6bea553345971 2024-11-13T00:30:04,098 INFO [M:0;cb28580903fb:32917 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b0cb5b543a4c4d15bbe6bea553345971 2024-11-13T00:30:04,098 INFO [M:0;cb28580903fb:32917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0cb5b543a4c4d15bbe6bea553345971, entries=6, sequenceid=60, filesize=6.1 K 2024-11-13T00:30:04,102 DEBUG [M:0;cb28580903fb:32917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/45333840373444489075690c18616dba as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/45333840373444489075690c18616dba 2024-11-13T00:30:04,111 INFO [M:0;cb28580903fb:32917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/45333840373444489075690c18616dba, entries=2, sequenceid=60, filesize=5.1 K 2024-11-13T00:30:04,112 DEBUG [M:0;cb28580903fb:32917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/10b71bfc6b0c44ce843479a6b10b68eb as hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/10b71bfc6b0c44ce843479a6b10b68eb 2024-11-13T00:30:04,120 INFO [M:0;cb28580903fb:32917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/10b71bfc6b0c44ce843479a6b10b68eb, entries=1, sequenceid=60, filesize=4.9 K 2024-11-13T00:30:04,122 INFO [M:0;cb28580903fb:32917 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 170ms, sequenceid=60, compaction requested=false 2024-11-13T00:30:04,136 INFO [M:0;cb28580903fb:32917 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:04,136 DEBUG [M:0;cb28580903fb:32917 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457803952Disabling compacts and flushes for region at 1731457803952Disabling writes for close at 1731457803952Obtaining lock to block concurrent updates at 1731457803953 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731457803953Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731457803953Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731457803954 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731457803954Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731457803975 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731457803975Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731457803989 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731457804005 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731457804005Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731457804019 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731457804035 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731457804035Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731457804052 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731457804069 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731457804069Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66d194dd: reopening flushed file at 1731457804082 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12d793b4: reopening flushed file at 1731457804090 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@54681546: reopening flushed file at 1731457804098 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f08dc7e: reopening flushed file at 1731457804111 (+13 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 170ms, sequenceid=60, compaction requested=false at 1731457804122 (+11 ms)Writing region close event to WAL at 1731457804136 (+14 ms)Closed at 1731457804136 2024-11-13T00:30:04,137 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:04,137 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:04,137 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:04,137 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:04,137 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:04,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741888_1071 (size=1045) 2024-11-13T00:30:04,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741888_1071 (size=1045) 2024-11-13T00:30:04,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:04,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:05,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:30:05,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:30:05,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:05,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:06,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:06,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:07,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35479 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:30:07,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:07,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:08,053 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2e663b2f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-155329190-172.17.0.2-1731457758228:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:39667,null,null]) java.net.ConnectException: Call From cb28580903fb/172.17.0.2 to localhost:33819 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T00:30:08,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:08,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:09,140 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-13T00:30:09,141 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:30:09,141 INFO [M:0;cb28580903fb:32917 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T00:30:09,141 INFO [M:0;cb28580903fb:32917 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32917 2024-11-13T00:30:09,141 INFO [M:0;cb28580903fb:32917 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:30:09,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:30:09,243 INFO [M:0;cb28580903fb:32917 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:30:09,243 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32917-0x1010ebaa2690000, quorum=127.0.0.1:62272, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:30:09,246 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4bb74c79{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:09,247 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@79e13ed6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:30:09,247 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:30:09,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30746b96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:30:09,247 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1716e69f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,STOPPED} 2024-11-13T00:30:09,249 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:30:09,249 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-155329190-172.17.0.2-1731457758228 (Datanode Uuid 15755521-cc20-4ae7-8d60-99b27f2bc8a1) service to localhost/127.0.0.1:34283 2024-11-13T00:30:09,249 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:30:09,249 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2e663b2f {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-155329190-172.17.0.2-1731457758228:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39667,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:33819 , LocalHost:localPort cb28580903fb/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-13T00:30:09,249 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:30:09,250 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@2e663b2f {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-155329190-172.17.0.2-1731457758228:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:35783,null,null], DatanodeInfoWithStorage[127.0.0.1:39667,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-155329190-172.17.0.2-1731457758228 2024-11-13T00:30:09,250 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data3/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:09,250 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data4/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:09,251 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:30:09,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77de3d88{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:09,254 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@55e6039a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:30:09,254 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:30:09,254 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4cb48605{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:30:09,255 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d0c94cc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,STOPPED} 2024-11-13T00:30:09,256 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:30:09,256 WARN [BP-155329190-172.17.0.2-1731457758228 heartbeating to localhost/127.0.0.1:34283 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-155329190-172.17.0.2-1731457758228 (Datanode Uuid 71bcd43b-d601-405d-8eb0-235245acf278) service to localhost/127.0.0.1:34283 2024-11-13T00:30:09,256 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:30:09,256 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:30:09,257 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data9/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:09,257 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/cluster_57ab5bb0-53ef-f515-2c82-6c13ee5c6aca/data/data10/current/BP-155329190-172.17.0.2-1731457758228 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:09,257 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:30:09,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@618abfb4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:30:09,266 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1585275{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:30:09,266 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:30:09,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7447e09f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:30:09,266 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3789f604{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir/,STOPPED} 2024-11-13T00:30:09,276 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T00:30:09,321 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T00:30:09,335 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 80) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f53ecbefa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39361 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:34283 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:39361 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34283 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:34283 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:34283 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:34283 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34283 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34283 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f53ecbefa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f53ecbefa48.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34283 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:34283 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34283 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:34283 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=440 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=211 (was 207) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1727 (was 1361) - AvailableMemoryMB LEAK? - 2024-11-13T00:30:09,345 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=440, MaxFileDescriptor=1048576, SystemLoadAverage=211, ProcessCount=11, AvailableMemoryMB=1727 2024-11-13T00:30:09,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T00:30:09,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.log.dir so I do NOT create it in target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c 2024-11-13T00:30:09,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bed78662-d1c4-0e5c-044c-1c6cedef83ec/hadoop.tmp.dir so I do NOT create it in target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c 2024-11-13T00:30:09,345 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340, deleteOnExit=true 2024-11-13T00:30:09,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T00:30:09,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/test.cache.data in system properties and HBase conf 2024-11-13T00:30:09,345 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T00:30:09,346 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:30:09,346 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:30:09,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T00:30:09,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/nfs.dump.dir in system properties and HBase conf 2024-11-13T00:30:09,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/java.io.tmpdir in system properties and HBase conf 2024-11-13T00:30:09,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:30:09,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T00:30:09,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T00:30:09,362 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:30:09,447 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:09,455 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:30:09,460 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:30:09,461 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:30:09,461 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:30:09,461 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:09,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b091339{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:30:09,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@278324d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:30:09,509 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T00:30:09,593 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10036843{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/java.io.tmpdir/jetty-localhost-33577-hadoop-hdfs-3_4_1-tests_jar-_-any-3618744147122486118/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:30:09,594 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a2900d5{HTTP/1.1, (http/1.1)}{localhost:33577} 2024-11-13T00:30:09,594 INFO [Time-limited test {}] server.Server(415): Started @155442ms 2024-11-13T00:30:09,608 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:30:09,667 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:09,703 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:09,707 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:30:09,712 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:30:09,713 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:30:09,713 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:30:09,713 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76b21b83{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:30:09,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f2f2a70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:30:09,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c5ef86e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/java.io.tmpdir/jetty-localhost-36671-hadoop-hdfs-3_4_1-tests_jar-_-any-4465548583210379907/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:09,833 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a2f7922{HTTP/1.1, (http/1.1)}{localhost:36671} 2024-11-13T00:30:09,834 INFO [Time-limited test {}] server.Server(415): Started @155682ms 2024-11-13T00:30:09,836 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:30:09,883 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:09,887 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:30:09,890 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:30:09,891 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:30:09,891 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:30:09,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e3a0c4e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:30:09,892 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41798127{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:30:09,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:09,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:09,957 WARN [Thread-1207 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data1/current/BP-472515239-172.17.0.2-1731457809380/current, will proceed with Du for space computation calculation, 2024-11-13T00:30:09,957 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data2/current/BP-472515239-172.17.0.2-1731457809380/current, will proceed with Du for space computation calculation, 2024-11-13T00:30:09,989 WARN [Thread-1186 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:30:09,992 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c013a340caddce9 with lease ID 0xd633080cf6eef929: Processing first storage report for DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2 from datanode DatanodeRegistration(127.0.0.1:36659, datanodeUuid=3eab685d-3b03-4ec1-ba04-2c0b82a3601d, infoPort=35761, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380) 2024-11-13T00:30:09,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c013a340caddce9 with lease ID 0xd633080cf6eef929: from storage DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2 node DatanodeRegistration(127.0.0.1:36659, datanodeUuid=3eab685d-3b03-4ec1-ba04-2c0b82a3601d, infoPort=35761, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:09,992 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c013a340caddce9 with lease ID 0xd633080cf6eef929: Processing first storage report for DS-03e2dfb5-b531-443c-90c1-b53d6d85bb99 from datanode DatanodeRegistration(127.0.0.1:36659, datanodeUuid=3eab685d-3b03-4ec1-ba04-2c0b82a3601d, infoPort=35761, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380) 2024-11-13T00:30:09,992 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c013a340caddce9 with lease ID 0xd633080cf6eef929: from storage DS-03e2dfb5-b531-443c-90c1-b53d6d85bb99 node DatanodeRegistration(127.0.0.1:36659, datanodeUuid=3eab685d-3b03-4ec1-ba04-2c0b82a3601d, infoPort=35761, infoSecurePort=0, ipcPort=43549, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:10,031 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39cbe06c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/java.io.tmpdir/jetty-localhost-42273-hadoop-hdfs-3_4_1-tests_jar-_-any-6566226690159300093/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:10,031 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78776860{HTTP/1.1, (http/1.1)}{localhost:42273} 2024-11-13T00:30:10,032 INFO [Time-limited test {}] server.Server(415): Started @155880ms 2024-11-13T00:30:10,034 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:30:10,157 WARN [Thread-1233 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data3/current/BP-472515239-172.17.0.2-1731457809380/current, will proceed with Du for space computation calculation, 2024-11-13T00:30:10,159 WARN [Thread-1234 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data4/current/BP-472515239-172.17.0.2-1731457809380/current, will proceed with Du for space computation calculation, 2024-11-13T00:30:10,176 WARN [Thread-1222 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:30:10,179 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8f7ca64a0cd5de50 with lease ID 0xd633080cf6eef92a: Processing first storage report for DS-5958543e-56a4-44a7-9bfc-b852d1a631b3 from datanode DatanodeRegistration(127.0.0.1:46725, datanodeUuid=98a15cd0-243c-422e-9014-af5f2d458c5f, infoPort=35309, infoSecurePort=0, ipcPort=37923, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380) 2024-11-13T00:30:10,179 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f7ca64a0cd5de50 with lease ID 0xd633080cf6eef92a: from storage DS-5958543e-56a4-44a7-9bfc-b852d1a631b3 node DatanodeRegistration(127.0.0.1:46725, datanodeUuid=98a15cd0-243c-422e-9014-af5f2d458c5f, infoPort=35309, infoSecurePort=0, ipcPort=37923, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:10,179 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8f7ca64a0cd5de50 with lease ID 0xd633080cf6eef92a: Processing first storage report for DS-8f22ec6a-74aa-4ab5-8936-386e88cfa4aa from datanode DatanodeRegistration(127.0.0.1:46725, datanodeUuid=98a15cd0-243c-422e-9014-af5f2d458c5f, infoPort=35309, infoSecurePort=0, ipcPort=37923, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380) 2024-11-13T00:30:10,179 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f7ca64a0cd5de50 with lease ID 0xd633080cf6eef92a: from storage DS-8f22ec6a-74aa-4ab5-8936-386e88cfa4aa node DatanodeRegistration(127.0.0.1:46725, datanodeUuid=98a15cd0-243c-422e-9014-af5f2d458c5f, infoPort=35309, infoSecurePort=0, ipcPort=37923, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:10,263 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c 2024-11-13T00:30:10,265 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/zookeeper_0, clientPort=58313, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T00:30:10,266 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58313 2024-11-13T00:30:10,267 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:10,268 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:10,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46725 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:30:10,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:30:10,280 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b with version=8 2024-11-13T00:30:10,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/hbase-staging 2024-11-13T00:30:10,282 INFO [Time-limited test {}] client.ConnectionUtils(128): master/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:30:10,282 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:10,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:10,283 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:30:10,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:10,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:30:10,283 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T00:30:10,283 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:30:10,284 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46601 2024-11-13T00:30:10,285 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46601 connecting to ZooKeeper ensemble=127.0.0.1:58313 2024-11-13T00:30:10,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:466010x0, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:30:10,293 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46601-0x1010ebb69e80000 connected 2024-11-13T00:30:10,316 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:10,318 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:10,320 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:30:10,321 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b, hbase.cluster.distributed=false 2024-11-13T00:30:10,323 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:30:10,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46601 2024-11-13T00:30:10,323 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46601 2024-11-13T00:30:10,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46601 2024-11-13T00:30:10,324 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46601 2024-11-13T00:30:10,328 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46601 2024-11-13T00:30:10,344 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:30:10,344 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:10,344 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:10,344 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:30:10,344 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:10,344 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:30:10,344 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T00:30:10,344 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:30:10,346 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42117 2024-11-13T00:30:10,348 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42117 connecting to ZooKeeper ensemble=127.0.0.1:58313 2024-11-13T00:30:10,349 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:10,351 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:10,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:421170x0, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:30:10,357 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:421170x0, quorum=127.0.0.1:58313, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:30:10,357 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T00:30:10,360 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42117-0x1010ebb69e80001 connected 2024-11-13T00:30:10,363 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T00:30:10,363 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T00:30:10,365 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:30:10,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42117 2024-11-13T00:30:10,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42117 2024-11-13T00:30:10,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42117 2024-11-13T00:30:10,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42117 2024-11-13T00:30:10,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42117 2024-11-13T00:30:10,392 DEBUG [M:0;cb28580903fb:46601 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;cb28580903fb:46601 2024-11-13T00:30:10,393 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/cb28580903fb,46601,1731457810282 2024-11-13T00:30:10,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:30:10,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:30:10,395 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/cb28580903fb,46601,1731457810282 2024-11-13T00:30:10,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T00:30:10,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:10,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:10,398 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T00:30:10,398 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/cb28580903fb,46601,1731457810282 from backup master directory 2024-11-13T00:30:10,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/cb28580903fb,46601,1731457810282 2024-11-13T00:30:10,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:30:10,401 WARN [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:30:10,401 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=cb28580903fb,46601,1731457810282 2024-11-13T00:30:10,406 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/hbase.id] with ID: 2817235d-2711-427e-ac51-1a211ce538d6 2024-11-13T00:30:10,406 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/.tmp/hbase.id 2024-11-13T00:30:10,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:30:10,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:30:10,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46725 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:30:10,422 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/.tmp/hbase.id]:[hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/hbase.id] 2024-11-13T00:30:10,435 INFO [master/cb28580903fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:10,435 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T00:30:10,437 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-13T00:30:10,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:10,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:10,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:30:10,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46725 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:30:10,459 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:30:10,461 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T00:30:10,461 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:30:10,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46725 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:30:10,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:30:10,479 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store 2024-11-13T00:30:10,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46725 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:30:10,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:30:10,490 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:10,490 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:30:10,490 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:10,490 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:10,490 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:30:10,490 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:10,490 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:10,491 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457810490Disabling compacts and flushes for region at 1731457810490Disabling writes for close at 1731457810490Writing region close event to WAL at 1731457810490Closed at 1731457810490 2024-11-13T00:30:10,492 WARN [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/.initializing 2024-11-13T00:30:10,492 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282 2024-11-13T00:30:10,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C46601%2C1731457810282, suffix=, logDir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282, archiveDir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/oldWALs, maxLogs=10 2024-11-13T00:30:10,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C46601%2C1731457810282.1731457810495 2024-11-13T00:30:10,505 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282/cb28580903fb%2C46601%2C1731457810282.1731457810495 2024-11-13T00:30:10,508 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35761:35761),(127.0.0.1/127.0.0.1:35309:35309)] 2024-11-13T00:30:10,511 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:30:10,511 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:10,511 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,511 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,513 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,515 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T00:30:10,515 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:10,516 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:10,516 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,517 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T00:30:10,518 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:10,518 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:30:10,518 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,520 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T00:30:10,520 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:10,520 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:30:10,520 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,521 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T00:30:10,522 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:10,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:30:10,523 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,524 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,524 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,526 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,526 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,527 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T00:30:10,528 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:10,530 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:30:10,531 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755823, jitterRate=-0.038921475410461426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T00:30:10,532 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731457810511Initializing all the Stores at 1731457810512 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457810513 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457810513Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457810513Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457810513Cleaning up temporary data from old regions at 1731457810526 (+13 ms)Region opened successfully at 1731457810532 (+6 ms) 2024-11-13T00:30:10,532 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T00:30:10,536 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f5b88a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:30:10,537 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T00:30:10,537 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T00:30:10,537 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T00:30:10,538 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T00:30:10,538 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T00:30:10,539 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T00:30:10,539 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T00:30:10,542 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T00:30:10,543 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T00:30:10,544 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T00:30:10,544 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T00:30:10,545 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T00:30:10,546 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T00:30:10,547 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T00:30:10,548 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T00:30:10,550 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T00:30:10,551 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T00:30:10,553 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T00:30:10,555 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T00:30:10,556 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T00:30:10,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:30:10,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:30:10,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:10,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:10,561 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=cb28580903fb,46601,1731457810282, sessionid=0x1010ebb69e80000, setting cluster-up flag (Was=false) 2024-11-13T00:30:10,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:10,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:10,571 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T00:30:10,572 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,46601,1731457810282 2024-11-13T00:30:10,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:10,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:10,581 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T00:30:10,582 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,46601,1731457810282 2024-11-13T00:30:10,584 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T00:30:10,585 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T00:30:10,586 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T00:30:10,586 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T00:30:10,586 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: cb28580903fb,46601,1731457810282 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T00:30:10,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:30:10,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:30:10,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:30:10,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:30:10,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/cb28580903fb:0, corePoolSize=10, maxPoolSize=10 2024-11-13T00:30:10,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:30:10,588 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,590 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:30:10,590 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731457840590 2024-11-13T00:30:10,590 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T00:30:10,590 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T00:30:10,590 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T00:30:10,590 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T00:30:10,590 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T00:30:10,590 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T00:30:10,590 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T00:30:10,591 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,591 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T00:30:10,591 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T00:30:10,591 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:10,591 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T00:30:10,591 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T00:30:10,592 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T00:30:10,592 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T00:30:10,593 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457810592,5,FailOnTimeoutGroup] 2024-11-13T00:30:10,596 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457810593,5,FailOnTimeoutGroup] 2024-11-13T00:30:10,596 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,596 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T00:30:10,596 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,596 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:30:10,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46725 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:30:10,603 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T00:30:10,604 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b 2024-11-13T00:30:10,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:30:10,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46725 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:30:10,611 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:10,612 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:30:10,614 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:30:10,614 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:10,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:10,615 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:30:10,616 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:30:10,616 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:10,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:10,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:30:10,618 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:30:10,618 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:10,619 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:10,619 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:30:10,620 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:30:10,620 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:10,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:10,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:30:10,621 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740 2024-11-13T00:30:10,622 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740 2024-11-13T00:30:10,623 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:30:10,623 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:30:10,624 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:30:10,625 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:30:10,628 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:30:10,629 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709161, jitterRate=-0.09825587272644043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:30:10,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731457810611Initializing all the Stores at 1731457810612 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457810612Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457810612Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457810612Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457810612Cleaning up temporary data from old regions at 1731457810623 (+11 ms)Region opened successfully at 1731457810629 (+6 ms) 2024-11-13T00:30:10,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:30:10,629 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:30:10,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:30:10,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:30:10,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:30:10,630 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:30:10,630 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457810629Disabling compacts and flushes for region at 1731457810629Disabling writes for close at 1731457810629Writing region close event to WAL at 1731457810630 (+1 ms)Closed at 1731457810630 2024-11-13T00:30:10,631 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:30:10,631 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T00:30:10,632 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T00:30:10,633 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:30:10,635 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T00:30:10,668 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:10,678 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(746): ClusterId : 2817235d-2711-427e-ac51-1a211ce538d6 2024-11-13T00:30:10,678 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T00:30:10,680 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T00:30:10,680 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T00:30:10,682 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T00:30:10,683 DEBUG [RS:0;cb28580903fb:42117 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2887615f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:30:10,702 DEBUG [RS:0;cb28580903fb:42117 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;cb28580903fb:42117 2024-11-13T00:30:10,702 INFO [RS:0;cb28580903fb:42117 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T00:30:10,702 INFO [RS:0;cb28580903fb:42117 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T00:30:10,702 DEBUG [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T00:30:10,703 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(2659): reportForDuty to master=cb28580903fb,46601,1731457810282 with port=42117, startcode=1731457810344 2024-11-13T00:30:10,703 DEBUG [RS:0;cb28580903fb:42117 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T00:30:10,706 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60907, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T00:30:10,706 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46601 {}] master.ServerManager(363): Checking decommissioned status of RegionServer cb28580903fb,42117,1731457810344 2024-11-13T00:30:10,707 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46601 {}] master.ServerManager(517): Registering regionserver=cb28580903fb,42117,1731457810344 2024-11-13T00:30:10,709 DEBUG [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b 2024-11-13T00:30:10,709 DEBUG [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43143 2024-11-13T00:30:10,709 DEBUG [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T00:30:10,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:30:10,712 DEBUG [RS:0;cb28580903fb:42117 {}] zookeeper.ZKUtil(111): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/cb28580903fb,42117,1731457810344 2024-11-13T00:30:10,712 WARN [RS:0;cb28580903fb:42117 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:30:10,712 INFO [RS:0;cb28580903fb:42117 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:30:10,712 DEBUG [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344 2024-11-13T00:30:10,713 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [cb28580903fb,42117,1731457810344] 2024-11-13T00:30:10,717 INFO [RS:0;cb28580903fb:42117 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T00:30:10,718 INFO [RS:0;cb28580903fb:42117 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T00:30:10,718 INFO [RS:0;cb28580903fb:42117 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T00:30:10,718 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,719 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T00:30:10,720 INFO [RS:0;cb28580903fb:42117 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T00:30:10,720 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,720 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,720 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,720 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,720 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,720 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,720 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:30:10,720 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,720 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,720 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,721 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,721 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,721 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:10,721 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:30:10,721 DEBUG [RS:0;cb28580903fb:42117 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:30:10,723 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,723 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,723 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,723 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,723 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,723 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,42117,1731457810344-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:30:10,740 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T00:30:10,740 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,42117,1731457810344-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,740 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,741 INFO [RS:0;cb28580903fb:42117 {}] regionserver.Replication(171): cb28580903fb,42117,1731457810344 started 2024-11-13T00:30:10,759 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:10,759 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(1482): Serving as cb28580903fb,42117,1731457810344, RpcServer on cb28580903fb/172.17.0.2:42117, sessionid=0x1010ebb69e80001 2024-11-13T00:30:10,759 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T00:30:10,759 DEBUG [RS:0;cb28580903fb:42117 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager cb28580903fb,42117,1731457810344 2024-11-13T00:30:10,759 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,42117,1731457810344' 2024-11-13T00:30:10,759 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T00:30:10,760 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T00:30:10,761 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T00:30:10,761 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T00:30:10,761 DEBUG [RS:0;cb28580903fb:42117 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager cb28580903fb,42117,1731457810344 2024-11-13T00:30:10,761 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,42117,1731457810344' 2024-11-13T00:30:10,761 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T00:30:10,761 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T00:30:10,761 DEBUG [RS:0;cb28580903fb:42117 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T00:30:10,761 INFO [RS:0;cb28580903fb:42117 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T00:30:10,761 INFO [RS:0;cb28580903fb:42117 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T00:30:10,785 WARN [cb28580903fb:46601 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T00:30:10,864 INFO [RS:0;cb28580903fb:42117 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C42117%2C1731457810344, suffix=, logDir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344, archiveDir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/oldWALs, maxLogs=32 2024-11-13T00:30:10,865 INFO [RS:0;cb28580903fb:42117 {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C42117%2C1731457810344.1731457810865 2024-11-13T00:30:10,873 INFO [RS:0;cb28580903fb:42117 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 2024-11-13T00:30:10,881 DEBUG [RS:0;cb28580903fb:42117 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35309:35309),(127.0.0.1/127.0.0.1:35761:35761)] 2024-11-13T00:30:10,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:10,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:11,035 DEBUG [cb28580903fb:46601 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T00:30:11,036 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=cb28580903fb,42117,1731457810344 2024-11-13T00:30:11,038 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,42117,1731457810344, state=OPENING 2024-11-13T00:30:11,040 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T00:30:11,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:11,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:11,043 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:30:11,043 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:30:11,043 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:30:11,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,42117,1731457810344}] 2024-11-13T00:30:11,197 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T00:30:11,200 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47425, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T00:30:11,209 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T00:30:11,209 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:30:11,211 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C42117%2C1731457810344.meta, suffix=.meta, logDir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344, archiveDir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/oldWALs, maxLogs=32 2024-11-13T00:30:11,212 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta 2024-11-13T00:30:11,228 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta 2024-11-13T00:30:11,240 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35309:35309),(127.0.0.1/127.0.0.1:35761:35761)] 2024-11-13T00:30:11,272 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:30:11,273 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T00:30:11,273 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T00:30:11,273 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T00:30:11,273 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T00:30:11,273 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:11,273 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T00:30:11,274 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T00:30:11,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:30:11,286 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:30:11,286 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:11,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:11,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:30:11,289 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:30:11,289 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:11,290 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:11,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:30:11,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:30:11,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:11,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:11,292 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:30:11,294 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:30:11,294 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:11,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:11,294 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:30:11,295 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740 2024-11-13T00:30:11,297 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740 2024-11-13T00:30:11,299 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:30:11,299 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:30:11,300 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:30:11,303 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:30:11,304 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725452, jitterRate=-0.07754029333591461}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:30:11,304 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T00:30:11,305 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731457811274Writing region info on filesystem at 1731457811274Initializing all the Stores at 1731457811281 (+7 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457811281Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457811284 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457811284Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457811284Cleaning up temporary data from old regions at 1731457811299 (+15 ms)Running coprocessor post-open hooks at 1731457811304 (+5 ms)Region opened successfully at 1731457811305 (+1 ms) 2024-11-13T00:30:11,306 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731457811197 2024-11-13T00:30:11,309 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T00:30:11,309 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T00:30:11,310 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,42117,1731457810344 2024-11-13T00:30:11,311 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,42117,1731457810344, state=OPEN 2024-11-13T00:30:11,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:30:11,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:30:11,316 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:30:11,316 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=cb28580903fb,42117,1731457810344 2024-11-13T00:30:11,317 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:30:11,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T00:30:11,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,42117,1731457810344 in 273 msec 2024-11-13T00:30:11,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T00:30:11,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 689 msec 2024-11-13T00:30:11,324 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:30:11,324 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T00:30:11,325 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:30:11,325 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,42117,1731457810344, seqNum=-1] 2024-11-13T00:30:11,326 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:30:11,327 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53861, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:30:11,332 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 746 msec 2024-11-13T00:30:11,332 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731457811332, completionTime=-1 2024-11-13T00:30:11,332 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T00:30:11,332 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T00:30:11,334 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T00:30:11,334 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731457871334 2024-11-13T00:30:11,334 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731457931334 2024-11-13T00:30:11,334 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-13T00:30:11,335 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46601,1731457810282-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:11,335 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46601,1731457810282-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:11,335 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46601,1731457810282-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:11,335 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-cb28580903fb:46601, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:11,335 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:11,335 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:11,336 DEBUG [master/cb28580903fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T00:30:11,339 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.938sec 2024-11-13T00:30:11,339 INFO [master/cb28580903fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T00:30:11,339 INFO [master/cb28580903fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T00:30:11,339 INFO [master/cb28580903fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T00:30:11,339 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T00:30:11,339 INFO [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T00:30:11,339 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46601,1731457810282-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:30:11,339 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46601,1731457810282-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T00:30:11,341 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T00:30:11,341 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T00:30:11,341 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,46601,1731457810282-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:11,378 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a0aea16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:30:11,378 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request cb28580903fb,46601,-1 for getting cluster id 2024-11-13T00:30:11,379 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T00:30:11,380 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2817235d-2711-427e-ac51-1a211ce538d6' 2024-11-13T00:30:11,381 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T00:30:11,381 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2817235d-2711-427e-ac51-1a211ce538d6" 2024-11-13T00:30:11,381 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a422eef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:30:11,381 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [cb28580903fb,46601,-1] 2024-11-13T00:30:11,381 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T00:30:11,382 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:30:11,383 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53124, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T00:30:11,384 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e43587e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:30:11,385 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:30:11,386 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,42117,1731457810344, seqNum=-1] 2024-11-13T00:30:11,386 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:30:11,388 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49202, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:30:11,389 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=cb28580903fb,46601,1731457810282 2024-11-13T00:30:11,389 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:11,392 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T00:30:11,392 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-13T00:30:11,392 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-13T00:30:11,392 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T00:30:11,393 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is cb28580903fb,46601,1731457810282 2024-11-13T00:30:11,393 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@45aa9693 2024-11-13T00:30:11,393 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T00:30:11,395 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53134, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T00:30:11,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46601 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T00:30:11,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46601 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T00:30:11,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46601 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:30:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46601 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T00:30:11,398 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T00:30:11,398 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:11,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46601 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-13T00:30:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46601 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T00:30:11,399 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T00:30:11,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46725 is added to blk_1073741835_1011 (size=395) 2024-11-13T00:30:11,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741835_1011 (size=395) 2024-11-13T00:30:11,409 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 68dec2c8fdcb5fec0c289698d8880a8a, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b 2024-11-13T00:30:11,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46725 is added to blk_1073741836_1012 (size=78) 2024-11-13T00:30:11,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36659 is added to blk_1073741836_1012 (size=78) 2024-11-13T00:30:11,424 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:11,425 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 68dec2c8fdcb5fec0c289698d8880a8a, disabling compactions & flushes 2024-11-13T00:30:11,425 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:11,425 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:11,425 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. after waiting 0 ms 2024-11-13T00:30:11,425 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:11,425 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:11,425 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 68dec2c8fdcb5fec0c289698d8880a8a: Waiting for close lock at 1731457811425Disabling compacts and flushes for region at 1731457811425Disabling writes for close at 1731457811425Writing region close event to WAL at 1731457811425Closed at 1731457811425 2024-11-13T00:30:11,427 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T00:30:11,427 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731457811427"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731457811427"}]},"ts":"1731457811427"} 2024-11-13T00:30:11,431 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T00:30:11,433 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T00:30:11,433 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731457811433"}]},"ts":"1731457811433"} 2024-11-13T00:30:11,436 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-13T00:30:11,436 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=68dec2c8fdcb5fec0c289698d8880a8a, ASSIGN}] 2024-11-13T00:30:11,438 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=68dec2c8fdcb5fec0c289698d8880a8a, ASSIGN 2024-11-13T00:30:11,439 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=68dec2c8fdcb5fec0c289698d8880a8a, ASSIGN; state=OFFLINE, location=cb28580903fb,42117,1731457810344; forceNewPlan=false, retain=false 2024-11-13T00:30:11,590 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=68dec2c8fdcb5fec0c289698d8880a8a, regionState=OPENING, regionLocation=cb28580903fb,42117,1731457810344 2024-11-13T00:30:11,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=68dec2c8fdcb5fec0c289698d8880a8a, ASSIGN because future has completed 2024-11-13T00:30:11,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 68dec2c8fdcb5fec0c289698d8880a8a, server=cb28580903fb,42117,1731457810344}] 2024-11-13T00:30:11,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:11,751 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:11,751 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 68dec2c8fdcb5fec0c289698d8880a8a, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:30:11,751 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,752 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:11,752 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,752 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,753 INFO [StoreOpener-68dec2c8fdcb5fec0c289698d8880a8a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,755 INFO [StoreOpener-68dec2c8fdcb5fec0c289698d8880a8a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 68dec2c8fdcb5fec0c289698d8880a8a columnFamilyName info 2024-11-13T00:30:11,755 DEBUG [StoreOpener-68dec2c8fdcb5fec0c289698d8880a8a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:11,755 INFO [StoreOpener-68dec2c8fdcb5fec0c289698d8880a8a-1 {}] regionserver.HStore(327): Store=68dec2c8fdcb5fec0c289698d8880a8a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:30:11,755 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,756 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/default/TestLogRolling-testLogRollOnPipelineRestart/68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,756 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/default/TestLogRolling-testLogRollOnPipelineRestart/68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,757 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,757 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,758 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,760 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/default/TestLogRolling-testLogRollOnPipelineRestart/68dec2c8fdcb5fec0c289698d8880a8a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:30:11,761 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 68dec2c8fdcb5fec0c289698d8880a8a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829357, jitterRate=0.05458320677280426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T00:30:11,761 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:11,761 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 68dec2c8fdcb5fec0c289698d8880a8a: Running coprocessor pre-open hook at 1731457811752Writing region info on filesystem at 1731457811752Initializing all the Stores at 1731457811753 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457811753Cleaning up temporary data from old regions at 1731457811757 (+4 ms)Running coprocessor post-open hooks at 1731457811761 (+4 ms)Region opened successfully at 1731457811761 2024-11-13T00:30:11,762 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a., pid=6, masterSystemTime=1731457811747 2024-11-13T00:30:11,765 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:11,765 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:11,766 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=68dec2c8fdcb5fec0c289698d8880a8a, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,42117,1731457810344 2024-11-13T00:30:11,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 68dec2c8fdcb5fec0c289698d8880a8a, server=cb28580903fb,42117,1731457810344 because future has completed 2024-11-13T00:30:11,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T00:30:11,772 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 68dec2c8fdcb5fec0c289698d8880a8a, server=cb28580903fb,42117,1731457810344 in 175 msec 2024-11-13T00:30:11,775 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T00:30:11,775 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=68dec2c8fdcb5fec0c289698d8880a8a, ASSIGN in 336 msec 2024-11-13T00:30:11,776 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T00:30:11,776 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731457811776"}]},"ts":"1731457811776"} 2024-11-13T00:30:11,778 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-13T00:30:11,779 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T00:30:11,781 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 384 msec 2024-11-13T00:30:11,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:11,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:12,669 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:12,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:12,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:13,670 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:13,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:13,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:14,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:14,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:14,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:14,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:14,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:14,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:14,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:14,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:14,523 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:14,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:14,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:14,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:14,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:15,029 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T00:30:15,042 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:15,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:15,043 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:15,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:15,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:15,044 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:15,047 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:15,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:15,048 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:15,050 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:15,671 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:15,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:15,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:16,672 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:16,717 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T00:30:16,717 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-13T00:30:16,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:16,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:17,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:17,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:17,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:18,673 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:18,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:18,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:19,509 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T00:30:19,509 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T00:30:19,510 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T00:30:19,510 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-13T00:30:19,511 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:30:19,511 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T00:30:19,511 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-13T00:30:19,511 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-13T00:30:19,674 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:19,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:19,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:20,675 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:20,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:20,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:21,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46601 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T00:30:21,455 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-13T00:30:21,455 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-13T00:30:21,458 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T00:30:21,458 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:21,461 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a., hostname=cb28580903fb,42117,1731457810344, seqNum=2] 2024-11-13T00:30:21,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:21,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:21,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:22,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:22,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:22,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:23,464 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 2024-11-13T00:30:23,464 WARN [ResponseProcessor for block BP-472515239-172.17.0.2-1731457809380:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-472515239-172.17.0.2-1731457809380:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:23,464 WARN [ResponseProcessor for block BP-472515239-172.17.0.2-1731457809380:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-472515239-172.17.0.2-1731457809380:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:23,465 WARN [ResponseProcessor for block BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:46725,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:23,465 WARN [DataStreamer for file /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282/cb28580903fb%2C46601%2C1731457810282.1731457810495 block BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK], DatanodeInfoWithStorage[127.0.0.1:46725,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46725,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK]) is bad. 2024-11-13T00:30:23,465 WARN [DataStreamer for file /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta block BP-472515239-172.17.0.2-1731457809380:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-472515239-172.17.0.2-1731457809380:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46725,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46725,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK]) is bad. 2024-11-13T00:30:23,465 WARN [DataStreamer for file /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 block BP-472515239-172.17.0.2-1731457809380:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-472515239-172.17.0.2-1731457809380:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46725,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK], DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46725,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK]) is bad. 2024-11-13T00:30:23,465 WARN [PacketResponder: BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46725] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:23,465 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-50605262_22 at /127.0.0.1:33214 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33214 dst: /127.0.0.1:46725 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:23,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-50605262_22 at /127.0.0.1:33204 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33204 dst: /127.0.0.1:46725 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:23,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-50605262_22 at /127.0.0.1:45986 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45986 dst: /127.0.0.1:36659 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:23,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787416943_22 at /127.0.0.1:58172 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58172 dst: /127.0.0.1:36659 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:23,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787416943_22 at /127.0.0.1:47126 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46725:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47126 dst: /127.0.0.1:46725 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:23,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-50605262_22 at /127.0.0.1:45980 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45980 dst: /127.0.0.1:36659 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:23,468 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39cbe06c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:23,468 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78776860{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:30:23,468 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:30:23,468 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41798127{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:30:23,469 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e3a0c4e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,STOPPED} 2024-11-13T00:30:23,470 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:30:23,471 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:30:23,471 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-472515239-172.17.0.2-1731457809380 (Datanode Uuid 98a15cd0-243c-422e-9014-af5f2d458c5f) service to localhost/127.0.0.1:43143 2024-11-13T00:30:23,471 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:30:23,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data3/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:23,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data4/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:23,471 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:30:23,481 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:23,484 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:30:23,485 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:30:23,485 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:30:23,485 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:30:23,485 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c978b57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:30:23,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6acddf16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:30:23,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@134817c5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/java.io.tmpdir/jetty-localhost-38491-hadoop-hdfs-3_4_1-tests_jar-_-any-47065102205988589/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:23,606 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70f21c85{HTTP/1.1, (http/1.1)}{localhost:38491} 2024-11-13T00:30:23,606 INFO [Time-limited test {}] server.Server(415): Started @169454ms 2024-11-13T00:30:23,608 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:30:23,639 WARN [ResponseProcessor for block BP-472515239-172.17.0.2-1731457809380:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-472515239-172.17.0.2-1731457809380:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:23,639 WARN [ResponseProcessor for block BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:23,639 WARN [ResponseProcessor for block BP-472515239-172.17.0.2-1731457809380:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-472515239-172.17.0.2-1731457809380:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:23,640 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787416943_22 at /127.0.0.1:45956 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45956 dst: /127.0.0.1:36659 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:23,640 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-50605262_22 at /127.0.0.1:45982 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45982 dst: /127.0.0.1:36659 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:23,641 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-50605262_22 at /127.0.0.1:45972 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36659:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45972 dst: /127.0.0.1:36659 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:23,648 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c5ef86e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:23,649 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a2f7922{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:30:23,649 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:30:23,649 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f2f2a70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:30:23,649 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76b21b83{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,STOPPED} 2024-11-13T00:30:23,651 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:30:23,651 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:30:23,651 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-472515239-172.17.0.2-1731457809380 (Datanode Uuid 3eab685d-3b03-4ec1-ba04-2c0b82a3601d) service to localhost/127.0.0.1:43143 2024-11-13T00:30:23,651 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:30:23,655 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data1/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:23,656 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data2/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:23,656 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:30:23,671 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:23,676 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:30:23,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:23,680 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:30:23,680 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:30:23,681 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:30:23,685 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70db9781{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:30:23,685 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5571dd44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:30:23,739 WARN [Thread-1357 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:30:23,742 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3afd7e71dfb893d9 with lease ID 0xd633080cf6eef92b: from storage DS-5958543e-56a4-44a7-9bfc-b852d1a631b3 node DatanodeRegistration(127.0.0.1:42125, datanodeUuid=98a15cd0-243c-422e-9014-af5f2d458c5f, infoPort=37683, infoSecurePort=0, ipcPort=42175, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:23,742 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3afd7e71dfb893d9 with lease ID 0xd633080cf6eef92b: from storage DS-8f22ec6a-74aa-4ab5-8936-386e88cfa4aa node DatanodeRegistration(127.0.0.1:42125, datanodeUuid=98a15cd0-243c-422e-9014-af5f2d458c5f, infoPort=37683, infoSecurePort=0, ipcPort=42175, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:23,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@63b7b5cb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/java.io.tmpdir/jetty-localhost-46099-hadoop-hdfs-3_4_1-tests_jar-_-any-9496512338573230256/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:23,813 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20126009{HTTP/1.1, (http/1.1)}{localhost:46099} 2024-11-13T00:30:23,813 INFO [Time-limited test {}] server.Server(415): Started @169661ms 2024-11-13T00:30:23,814 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:30:23,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:23,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:23,954 WARN [Thread-1388 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:30:23,957 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5624eb869abcd487 with lease ID 0xd633080cf6eef92c: from storage DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2 node DatanodeRegistration(127.0.0.1:46037, datanodeUuid=3eab685d-3b03-4ec1-ba04-2c0b82a3601d, infoPort=42505, infoSecurePort=0, ipcPort=37015, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:23,957 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5624eb869abcd487 with lease ID 0xd633080cf6eef92c: from storage DS-03e2dfb5-b531-443c-90c1-b53d6d85bb99 node DatanodeRegistration(127.0.0.1:46037, datanodeUuid=3eab685d-3b03-4ec1-ba04-2c0b82a3601d, infoPort=42505, infoSecurePort=0, ipcPort=37015, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:24,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:24,839 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-13T00:30:24,841 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-13T00:30:24,842 ERROR [FSHLog-0-hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b-prefix:cb28580903fb,42117,1731457810344 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:24,842 WARN [FSHLog-0-hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b-prefix:cb28580903fb,42117,1731457810344 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:24,843 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C42117%2C1731457810344:(num 1731457810865) roll requested 2024-11-13T00:30:24,843 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C42117%2C1731457810344.1731457824843 2024-11-13T00:30:24,849 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 newFile=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 2024-11-13T00:30:24,849 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:24,849 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:24,849 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:24,849 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:24,849 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:24,849 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 2024-11-13T00:30:24,850 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:24,850 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:24,850 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 2024-11-13T00:30:24,850 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37683:37683),(127.0.0.1/127.0.0.1:42505:42505)] 2024-11-13T00:30:24,851 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 is not closed yet, will try archiving it next time 2024-11-13T00:30:24,851 WARN [IPC Server handler 1 on default port 43143 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-13T00:30:24,851 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 after 1ms 2024-11-13T00:30:24,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:24,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:25,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:25,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:25,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:26,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:26,854 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-13T00:30:26,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:26,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:27,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:27,741 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T00:30:27,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:27,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:28,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:28,852 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 after 4002ms 2024-11-13T00:30:28,857 WARN [ResponseProcessor for block BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:46037,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:28,857 WARN [DataStreamer for file /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 block BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42125,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK], DatanodeInfoWithStorage[127.0.0.1:46037,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46037,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]) is bad. 2024-11-13T00:30:28,857 WARN [PacketResponder: BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46037] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:28,857 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-50605262_22 at /127.0.0.1:59372 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59372 dst: /127.0.0.1:42125 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:28,858 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-50605262_22 at /127.0.0.1:48922 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:46037:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48922 dst: /127.0.0.1:46037 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:28,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@63b7b5cb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:28,859 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20126009{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:30:28,860 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:30:28,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5571dd44{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:30:28,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70db9781{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,STOPPED} 2024-11-13T00:30:28,861 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:30:28,861 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:30:28,861 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:30:28,861 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-472515239-172.17.0.2-1731457809380 (Datanode Uuid 3eab685d-3b03-4ec1-ba04-2c0b82a3601d) service to localhost/127.0.0.1:43143 2024-11-13T00:30:28,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data1/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:28,862 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data2/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:28,862 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:30:28,876 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:28,879 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:30:28,880 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:30:28,880 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:30:28,880 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:30:28,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e617473{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:30:28,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e5c1ba8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:30:28,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:28,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:28,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@53bd4263{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/java.io.tmpdir/jetty-localhost-42095-hadoop-hdfs-3_4_1-tests_jar-_-any-15381182558034171225/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:28,996 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60ff2da2{HTTP/1.1, (http/1.1)}{localhost:42095} 2024-11-13T00:30:28,996 INFO [Time-limited test {}] server.Server(415): Started @174844ms 2024-11-13T00:30:28,998 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:30:29,014 WARN [ResponseProcessor for block BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:29,015 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-50605262_22 at /127.0.0.1:59394 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42125:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59394 dst: /127.0.0.1:42125 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:29,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@134817c5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:29,018 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70f21c85{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:30:29,018 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:30:29,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6acddf16{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:30:29,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c978b57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,STOPPED} 2024-11-13T00:30:29,020 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:30:29,020 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-472515239-172.17.0.2-1731457809380 (Datanode Uuid 98a15cd0-243c-422e-9014-af5f2d458c5f) service to localhost/127.0.0.1:43143 2024-11-13T00:30:29,020 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:30:29,020 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:30:29,020 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data3/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:29,021 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data4/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:29,021 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:30:29,030 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:29,033 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:30:29,034 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:30:29,034 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:30:29,035 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:30:29,036 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10a15b18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:30:29,037 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dfe1abd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:30:29,089 WARN [Thread-1431 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:30:29,091 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfead795da0bbffa5 with lease ID 0xd633080cf6eef92d: from storage DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2 node DatanodeRegistration(127.0.0.1:42603, datanodeUuid=3eab685d-3b03-4ec1-ba04-2c0b82a3601d, infoPort=41907, infoSecurePort=0, ipcPort=34989, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:29,091 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfead795da0bbffa5 with lease ID 0xd633080cf6eef92d: from storage DS-03e2dfb5-b531-443c-90c1-b53d6d85bb99 node DatanodeRegistration(127.0.0.1:42603, datanodeUuid=3eab685d-3b03-4ec1-ba04-2c0b82a3601d, infoPort=41907, infoSecurePort=0, ipcPort=34989, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:29,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6a3feff9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/java.io.tmpdir/jetty-localhost-32835-hadoop-hdfs-3_4_1-tests_jar-_-any-11770312188120379131/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:29,156 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@708b22ef{HTTP/1.1, (http/1.1)}{localhost:32835} 2024-11-13T00:30:29,156 INFO [Time-limited test {}] server.Server(415): Started @175004ms 2024-11-13T00:30:29,158 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:30:29,248 WARN [Thread-1462 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:30:29,250 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c1b8fa47330e98f with lease ID 0xd633080cf6eef92e: from storage DS-5958543e-56a4-44a7-9bfc-b852d1a631b3 node DatanodeRegistration(127.0.0.1:37719, datanodeUuid=98a15cd0-243c-422e-9014-af5f2d458c5f, infoPort=36703, infoSecurePort=0, ipcPort=35231, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:29,251 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7c1b8fa47330e98f with lease ID 0xd633080cf6eef92e: from storage DS-8f22ec6a-74aa-4ab5-8936-386e88cfa4aa node DatanodeRegistration(127.0.0.1:37719, datanodeUuid=98a15cd0-243c-422e-9014-af5f2d458c5f, infoPort=36703, infoSecurePort=0, ipcPort=35231, storageInfo=lv=-57;cid=testClusterID;nsid=235753840;c=1731457809380), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:29,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:29,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:29,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:30,182 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-13T00:30:30,184 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-13T00:30:30,185 ERROR [FSHLog-0-hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b-prefix:cb28580903fb,42117,1731457810344 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42125,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:30,185 WARN [FSHLog-0-hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b-prefix:cb28580903fb,42117,1731457810344 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42125,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:30,185 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C42117%2C1731457810344:(num 1731457824843) roll requested 2024-11-13T00:30:30,185 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C42117%2C1731457810344.1731457830185 2024-11-13T00:30:30,190 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 newFile=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457830185 2024-11-13T00:30:30,190 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:30,191 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:30,191 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:30,191 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:30,191 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:30,191 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457830185 2024-11-13T00:30:30,191 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42125,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:30,191 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42125,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:30,191 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 2024-11-13T00:30:30,192 WARN [IPC Server handler 2 on default port 43143 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-13T00:30:30,192 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41907:41907),(127.0.0.1/127.0.0.1:36703:36703)] 2024-11-13T00:30:30,192 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 is not closed yet, will try archiving it next time 2024-11-13T00:30:30,192 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 after 1ms 2024-11-13T00:30:30,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:30,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:30,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:31,682 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:31,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:31,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:32,193 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C42117%2C1731457810344.1731457832193 2024-11-13T00:30:32,199 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457830185 newFile=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 2024-11-13T00:30:32,199 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:32,199 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:32,199 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:32,199 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:32,199 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:32,199 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457830185 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 2024-11-13T00:30:32,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741838_1019 (size=1264) 2024-11-13T00:30:32,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741838_1019 (size=1264) 2024-11-13T00:30:32,202 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41907:41907),(127.0.0.1/127.0.0.1:36703:36703)] 2024-11-13T00:30:32,202 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 is not closed yet, will try archiving it next time 2024-11-13T00:30:32,202 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 is not closed yet, will try archiving it next time 2024-11-13T00:30:32,202 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 2024-11-13T00:30:32,202 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 2024-11-13T00:30:32,203 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 after 1ms 2024-11-13T00:30:32,203 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 2024-11-13T00:30:32,212 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731457811761/Put/vlen=218/seqid=0] 2024-11-13T00:30:32,212 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731457821462/Put/vlen=1045/seqid=0] 2024-11-13T00:30:32,212 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457810865 2024-11-13T00:30:32,212 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 2024-11-13T00:30:32,212 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 2024-11-13T00:30:32,212 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 after 0ms 2024-11-13T00:30:32,212 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 2024-11-13T00:30:32,215 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731457824842/Put/vlen=1045/seqid=0] 2024-11-13T00:30:32,216 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731457826855/Put/vlen=1045/seqid=0] 2024-11-13T00:30:32,216 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 2024-11-13T00:30:32,216 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457830185 2024-11-13T00:30:32,216 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457830185 2024-11-13T00:30:32,216 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457830185 after 0ms 2024-11-13T00:30:32,216 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457830185 2024-11-13T00:30:32,219 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731457830185/Put/vlen=1045/seqid=0] 2024-11-13T00:30:32,219 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 2024-11-13T00:30:32,219 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 2024-11-13T00:30:32,219 WARN [IPC Server handler 4 on default port 43143 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-13T00:30:32,220 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 after 1ms 2024-11-13T00:30:32,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:32,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:32,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:33,092 WARN [ResponseProcessor for block BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:33,092 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787416943_22 at /127.0.0.1:56990 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42603:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56990 dst: /127.0.0.1:42603 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:42603 remote=/127.0.0.1:56990]. Total timeout mills is 60000, 59106 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:33,092 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_787416943_22 at /127.0.0.1:48398 [Receiving block BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:37719:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48398 dst: /127.0.0.1:37719 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:33,093 WARN [DataStreamer for file /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 block BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42603,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK], DatanodeInfoWithStorage[127.0.0.1:37719,DS-5958543e-56a4-44a7-9bfc-b852d1a631b3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42603,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]) is bad. 2024-11-13T00:30:33,097 WARN [DataStreamer for file /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 block BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:33,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741839_1022 (size=85) 2024-11-13T00:30:33,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741839_1022 (size=85) 2024-11-13T00:30:33,683 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:33,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:33,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:34,091 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T00:30:34,193 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457824843 after 4002ms 2024-11-13T00:30:34,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:34,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:34,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:35,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:35,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:35,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:36,221 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 after 4001ms 2024-11-13T00:30:36,221 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 2024-11-13T00:30:36,224 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 2024-11-13T00:30:36,225 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-13T00:30:36,225 ERROR [FSHLog-0-hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b-prefix:cb28580903fb,42117,1731457810344.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:36,225 WARN [FSHLog-0-hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b-prefix:cb28580903fb,42117,1731457810344.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:36,225 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C42117%2C1731457810344.meta:.meta(num 1731457811212) roll requested 2024-11-13T00:30:36,226 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C42117%2C1731457810344.meta.1731457836225.meta 2024-11-13T00:30:36,230 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,230 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,230 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,230 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,230 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,231 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.meta.1731457836225.meta 2024-11-13T00:30:36,231 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:36,231 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:36,231 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta 2024-11-13T00:30:36,231 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36703:36703),(127.0.0.1/127.0.0.1:41907:41907)] 2024-11-13T00:30:36,231 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta is not closed yet, will try archiving it next time 2024-11-13T00:30:36,231 WARN [IPC Server handler 1 on default port 43143 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-11-13T00:30:36,232 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta after 1ms 2024-11-13T00:30:36,247 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/.tmp/info/9e63ce9f573d4b73a4a9175e6f94c752 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a./info:regioninfo/1731457811765/Put/seqid=0 2024-11-13T00:30:36,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741841_1025 (size=7125) 2024-11-13T00:30:36,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741841_1025 (size=7125) 2024-11-13T00:30:36,252 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/.tmp/info/9e63ce9f573d4b73a4a9175e6f94c752 2024-11-13T00:30:36,272 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/.tmp/ns/b12545762600494bba30441cdd436483 is 43, key is default/ns:d/1731457811327/Put/seqid=0 2024-11-13T00:30:36,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741842_1026 (size=5153) 2024-11-13T00:30:36,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741842_1026 (size=5153) 2024-11-13T00:30:36,277 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/.tmp/ns/b12545762600494bba30441cdd436483 2024-11-13T00:30:36,296 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/.tmp/table/05d3bf69b6974dea9f720abff9b87eef is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731457811776/Put/seqid=0 2024-11-13T00:30:36,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741843_1027 (size=5438) 2024-11-13T00:30:36,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741843_1027 (size=5438) 2024-11-13T00:30:36,301 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/.tmp/table/05d3bf69b6974dea9f720abff9b87eef 2024-11-13T00:30:36,307 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/.tmp/info/9e63ce9f573d4b73a4a9175e6f94c752 as hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/info/9e63ce9f573d4b73a4a9175e6f94c752 2024-11-13T00:30:36,312 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/info/9e63ce9f573d4b73a4a9175e6f94c752, entries=10, sequenceid=11, filesize=7.0 K 2024-11-13T00:30:36,313 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/.tmp/ns/b12545762600494bba30441cdd436483 as hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/ns/b12545762600494bba30441cdd436483 2024-11-13T00:30:36,317 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/ns/b12545762600494bba30441cdd436483, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T00:30:36,318 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/.tmp/table/05d3bf69b6974dea9f720abff9b87eef as hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/table/05d3bf69b6974dea9f720abff9b87eef 2024-11-13T00:30:36,323 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/table/05d3bf69b6974dea9f720abff9b87eef, entries=2, sequenceid=11, filesize=5.3 K 2024-11-13T00:30:36,324 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 100ms, sequenceid=11, compaction requested=false 2024-11-13T00:30:36,324 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T00:30:36,324 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 68dec2c8fdcb5fec0c289698d8880a8a 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-13T00:30:36,325 ERROR [FSHLog-0-hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b-prefix:cb28580903fb,42117,1731457810344 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:36,325 WARN [FSHLog-0-hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b-prefix:cb28580903fb,42117,1731457810344 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:36,325 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C42117%2C1731457810344:(num 1731457832193) roll requested 2024-11-13T00:30:36,326 INFO [regionserver/cb28580903fb:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C42117%2C1731457810344.1731457836326 2024-11-13T00:30:36,330 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 newFile=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457836326 2024-11-13T00:30:36,330 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,331 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,331 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,331 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,331 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,331 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457836326 2024-11-13T00:30:36,331 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:36,332 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36703:36703),(127.0.0.1/127.0.0.1:41907:41907)] 2024-11-13T00:30:36,332 DEBUG [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 is not closed yet, will try archiving it next time 2024-11-13T00:30:36,332 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-472515239-172.17.0.2-1731457809380:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor198.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor197.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:36,332 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 2024-11-13T00:30:36,332 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 after 0ms 2024-11-13T00:30:36,333 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.1731457832193 to hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/oldWALs/cb28580903fb%2C42117%2C1731457810344.1731457832193 2024-11-13T00:30:36,347 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/default/TestLogRolling-testLogRollOnPipelineRestart/68dec2c8fdcb5fec0c289698d8880a8a/.tmp/info/cfbf8f380a324ed3bf1ef4a6513cb757 is 1080, key is row1002/info:/1731457821462/Put/seqid=0 2024-11-13T00:30:36,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741845_1029 (size=9270) 2024-11-13T00:30:36,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741845_1029 (size=9270) 2024-11-13T00:30:36,352 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/default/TestLogRolling-testLogRollOnPipelineRestart/68dec2c8fdcb5fec0c289698d8880a8a/.tmp/info/cfbf8f380a324ed3bf1ef4a6513cb757 2024-11-13T00:30:36,358 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/default/TestLogRolling-testLogRollOnPipelineRestart/68dec2c8fdcb5fec0c289698d8880a8a/.tmp/info/cfbf8f380a324ed3bf1ef4a6513cb757 as hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/default/TestLogRolling-testLogRollOnPipelineRestart/68dec2c8fdcb5fec0c289698d8880a8a/info/cfbf8f380a324ed3bf1ef4a6513cb757 2024-11-13T00:30:36,363 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/default/TestLogRolling-testLogRollOnPipelineRestart/68dec2c8fdcb5fec0c289698d8880a8a/info/cfbf8f380a324ed3bf1ef4a6513cb757, entries=4, sequenceid=8, filesize=9.1 K 2024-11-13T00:30:36,364 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 68dec2c8fdcb5fec0c289698d8880a8a in 40ms, sequenceid=8, compaction requested=false 2024-11-13T00:30:36,364 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 68dec2c8fdcb5fec0c289698d8880a8a: 2024-11-13T00:30:36,370 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T00:30:36,370 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:30:36,370 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:30:36,370 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:30:36,370 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:30:36,370 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T00:30:36,371 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T00:30:36,371 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=958659110, stopped=false 2024-11-13T00:30:36,371 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=cb28580903fb,46601,1731457810282 2024-11-13T00:30:36,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:30:36,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:30:36,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:36,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:36,374 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:30:36,374 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:30:36,374 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:30:36,375 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:30:36,375 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'cb28580903fb,42117,1731457810344' ***** 2024-11-13T00:30:36,375 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T00:30:36,375 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T00:30:36,375 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T00:30:36,375 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:30:36,375 INFO [RS:0;cb28580903fb:42117 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T00:30:36,375 INFO [RS:0;cb28580903fb:42117 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T00:30:36,375 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(3091): Received CLOSE for 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:36,376 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:30:36,376 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(959): stopping server cb28580903fb,42117,1731457810344 2024-11-13T00:30:36,376 INFO [RS:0;cb28580903fb:42117 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:30:36,376 INFO [RS:0;cb28580903fb:42117 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;cb28580903fb:42117. 2024-11-13T00:30:36,376 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 68dec2c8fdcb5fec0c289698d8880a8a, disabling compactions & flushes 2024-11-13T00:30:36,376 DEBUG [RS:0;cb28580903fb:42117 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:30:36,376 DEBUG [RS:0;cb28580903fb:42117 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:30:36,376 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:36,376 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:36,376 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. after waiting 0 ms 2024-11-13T00:30:36,376 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:36,376 INFO [RS:0;cb28580903fb:42117 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T00:30:36,376 INFO [RS:0;cb28580903fb:42117 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T00:30:36,376 INFO [RS:0;cb28580903fb:42117 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T00:30:36,376 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T00:30:36,379 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T00:30:36,379 DEBUG [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 68dec2c8fdcb5fec0c289698d8880a8a=TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a.} 2024-11-13T00:30:36,379 DEBUG [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 68dec2c8fdcb5fec0c289698d8880a8a 2024-11-13T00:30:36,379 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:30:36,379 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:30:36,379 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:30:36,379 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:30:36,379 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:30:36,384 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/default/TestLogRolling-testLogRollOnPipelineRestart/68dec2c8fdcb5fec0c289698d8880a8a/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-13T00:30:36,384 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:36,384 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 68dec2c8fdcb5fec0c289698d8880a8a: Waiting for close lock at 1731457836376Running coprocessor pre-close hooks at 1731457836376Disabling compacts and flushes for region at 1731457836376Disabling writes for close at 1731457836376Writing region close event to WAL at 1731457836379 (+3 ms)Running coprocessor post-close hooks at 1731457836384 (+5 ms)Closed at 1731457836384 2024-11-13T00:30:36,385 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731457811395.68dec2c8fdcb5fec0c289698d8880a8a. 2024-11-13T00:30:36,388 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T00:30:36,389 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:30:36,389 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:30:36,389 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457836379Running coprocessor pre-close hooks at 1731457836379Disabling compacts and flushes for region at 1731457836379Disabling writes for close at 1731457836379Writing region close event to WAL at 1731457836384 (+5 ms)Running coprocessor post-close hooks at 1731457836388 (+4 ms)Closed at 1731457836389 (+1 ms) 2024-11-13T00:30:36,389 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T00:30:36,579 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(976): stopping server cb28580903fb,42117,1731457810344; all regions closed. 2024-11-13T00:30:36,580 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,580 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,580 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,580 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,580 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:36,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741840_1023 (size=825) 2024-11-13T00:30:36,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741840_1023 (size=825) 2024-11-13T00:30:36,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:36,724 INFO [regionserver/cb28580903fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T00:30:36,724 INFO [regionserver/cb28580903fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T00:30:36,725 INFO [regionserver/cb28580903fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:30:36,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:36,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:37,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:37,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:37,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:38,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:38,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:38,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:39,509 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:30:39,509 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T00:30:39,510 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-13T00:30:39,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:39,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:39,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:40,232 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta after 4001ms 2024-11-13T00:30:40,233 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/WALs/cb28580903fb,42117,1731457810344/cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta to hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/oldWALs/cb28580903fb%2C42117%2C1731457810344.meta.1731457811212.meta 2024-11-13T00:30:40,235 DEBUG [RS:0;cb28580903fb:42117 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/oldWALs 2024-11-13T00:30:40,235 INFO [RS:0;cb28580903fb:42117 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C42117%2C1731457810344.meta:.meta(num 1731457836225) 2024-11-13T00:30:40,236 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,236 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,236 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,236 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,236 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741844_1028 (size=1162) 2024-11-13T00:30:40,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741844_1028 (size=1162) 2024-11-13T00:30:40,242 DEBUG [RS:0;cb28580903fb:42117 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/oldWALs 2024-11-13T00:30:40,243 INFO [RS:0;cb28580903fb:42117 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C42117%2C1731457810344:(num 1731457836326) 2024-11-13T00:30:40,243 DEBUG [RS:0;cb28580903fb:42117 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:30:40,243 INFO [RS:0;cb28580903fb:42117 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:30:40,243 INFO [RS:0;cb28580903fb:42117 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:30:40,243 INFO [RS:0;cb28580903fb:42117 {}] hbase.ChoreService(370): Chore service for: regionserver/cb28580903fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T00:30:40,243 INFO [RS:0;cb28580903fb:42117 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:30:40,243 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:30:40,243 INFO [RS:0;cb28580903fb:42117 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42117 2024-11-13T00:30:40,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:30:40,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/cb28580903fb,42117,1731457810344 2024-11-13T00:30:40,246 INFO [RS:0;cb28580903fb:42117 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:30:40,248 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [cb28580903fb,42117,1731457810344] 2024-11-13T00:30:40,249 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/cb28580903fb,42117,1731457810344 already deleted, retry=false 2024-11-13T00:30:40,249 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; cb28580903fb,42117,1731457810344 expired; onlineServers=0 2024-11-13T00:30:40,249 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'cb28580903fb,46601,1731457810282' ***** 2024-11-13T00:30:40,249 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T00:30:40,249 INFO [M:0;cb28580903fb:46601 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:30:40,249 INFO [M:0;cb28580903fb:46601 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:30:40,249 DEBUG [M:0;cb28580903fb:46601 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T00:30:40,250 DEBUG [M:0;cb28580903fb:46601 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T00:30:40,250 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T00:30:40,250 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457810592 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457810592,5,FailOnTimeoutGroup] 2024-11-13T00:30:40,250 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457810593 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457810593,5,FailOnTimeoutGroup] 2024-11-13T00:30:40,250 INFO [M:0;cb28580903fb:46601 {}] hbase.ChoreService(370): Chore service for: master/cb28580903fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T00:30:40,250 INFO [M:0;cb28580903fb:46601 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:30:40,250 DEBUG [M:0;cb28580903fb:46601 {}] master.HMaster(1795): Stopping service threads 2024-11-13T00:30:40,250 INFO [M:0;cb28580903fb:46601 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T00:30:40,250 INFO [M:0;cb28580903fb:46601 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:30:40,250 INFO [M:0;cb28580903fb:46601 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T00:30:40,251 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T00:30:40,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T00:30:40,251 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T00:30:40,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:40,251 DEBUG [M:0;cb28580903fb:46601 {}] zookeeper.ZKUtil(347): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T00:30:40,251 WARN [M:0;cb28580903fb:46601 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T00:30:40,252 INFO [M:0;cb28580903fb:46601 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/.lastflushedseqids 2024-11-13T00:30:40,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741846_1030 (size=120) 2024-11-13T00:30:40,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741846_1030 (size=120) 2024-11-13T00:30:40,257 INFO [M:0;cb28580903fb:46601 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T00:30:40,257 INFO [M:0;cb28580903fb:46601 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T00:30:40,257 DEBUG [M:0;cb28580903fb:46601 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:30:40,258 INFO [M:0;cb28580903fb:46601 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:40,258 DEBUG [M:0;cb28580903fb:46601 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:40,258 DEBUG [M:0;cb28580903fb:46601 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:30:40,258 DEBUG [M:0;cb28580903fb:46601 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:40,258 INFO [M:0;cb28580903fb:46601 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-13T00:30:40,258 ERROR [FSHLog-0-hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData-prefix:cb28580903fb,46601,1731457810282 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:40,258 WARN [FSHLog-0-hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData-prefix:cb28580903fb,46601,1731457810282 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:40,258 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog cb28580903fb%2C46601%2C1731457810282:(num 1731457810495) roll requested 2024-11-13T00:30:40,259 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C46601%2C1731457810282.1731457840258 2024-11-13T00:30:40,262 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T00:30:40,263 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,263 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,263 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,263 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,263 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,263 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282/cb28580903fb%2C46601%2C1731457810282.1731457810495 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282/cb28580903fb%2C46601%2C1731457810282.1731457840258 2024-11-13T00:30:40,264 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:40,264 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:36659,DS-90d4e24e-d0f9-4947-abd1-b96dac04a6f2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-13T00:30:40,264 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282/cb28580903fb%2C46601%2C1731457810282.1731457810495 2024-11-13T00:30:40,264 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41907:41907),(127.0.0.1/127.0.0.1:36703:36703)] 2024-11-13T00:30:40,264 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282/cb28580903fb%2C46601%2C1731457810282.1731457810495 is not closed yet, will try archiving it next time 2024-11-13T00:30:40,264 WARN [IPC Server handler 2 on default port 43143 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282/cb28580903fb%2C46601%2C1731457810282.1731457810495 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-13T00:30:40,264 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282/cb28580903fb%2C46601%2C1731457810282.1731457810495 after 0ms 2024-11-13T00:30:40,279 DEBUG [M:0;cb28580903fb:46601 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d6fecdc58b74443ba359b8a01d288b79 is 82, key is hbase:meta,,1/info:regioninfo/1731457811310/Put/seqid=0 2024-11-13T00:30:40,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741848_1033 (size=5672) 2024-11-13T00:30:40,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741848_1033 (size=5672) 2024-11-13T00:30:40,283 INFO [M:0;cb28580903fb:46601 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d6fecdc58b74443ba359b8a01d288b79 2024-11-13T00:30:40,303 DEBUG [M:0;cb28580903fb:46601 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da6ac6508f3242dca2f05251ee08e642 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731457811780/Put/seqid=0 2024-11-13T00:30:40,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741849_1034 (size=6119) 2024-11-13T00:30:40,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741849_1034 (size=6119) 2024-11-13T00:30:40,309 INFO [M:0;cb28580903fb:46601 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da6ac6508f3242dca2f05251ee08e642 2024-11-13T00:30:40,328 DEBUG [M:0;cb28580903fb:46601 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/35bf7334b5db44379f096d575ef77007 is 69, key is cb28580903fb,42117,1731457810344/rs:state/1731457810707/Put/seqid=0 2024-11-13T00:30:40,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741850_1035 (size=5156) 2024-11-13T00:30:40,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741850_1035 (size=5156) 2024-11-13T00:30:40,334 INFO [M:0;cb28580903fb:46601 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/35bf7334b5db44379f096d575ef77007 2024-11-13T00:30:40,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:30:40,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42117-0x1010ebb69e80001, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:30:40,348 INFO [RS:0;cb28580903fb:42117 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:30:40,348 INFO [RS:0;cb28580903fb:42117 {}] regionserver.HRegionServer(1031): Exiting; stopping=cb28580903fb,42117,1731457810344; zookeeper connection closed. 2024-11-13T00:30:40,348 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@41168c86 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@41168c86 2024-11-13T00:30:40,349 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T00:30:40,352 DEBUG [M:0;cb28580903fb:46601 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8ac7fae96f1b45d08450150a7f68fd03 is 52, key is load_balancer_on/state:d/1731457811391/Put/seqid=0 2024-11-13T00:30:40,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741851_1036 (size=5056) 2024-11-13T00:30:40,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741851_1036 (size=5056) 2024-11-13T00:30:40,358 INFO [M:0;cb28580903fb:46601 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8ac7fae96f1b45d08450150a7f68fd03 2024-11-13T00:30:40,363 DEBUG [M:0;cb28580903fb:46601 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d6fecdc58b74443ba359b8a01d288b79 as hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d6fecdc58b74443ba359b8a01d288b79 2024-11-13T00:30:40,368 INFO [M:0;cb28580903fb:46601 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d6fecdc58b74443ba359b8a01d288b79, entries=8, sequenceid=56, filesize=5.5 K 2024-11-13T00:30:40,369 DEBUG [M:0;cb28580903fb:46601 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da6ac6508f3242dca2f05251ee08e642 as hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/da6ac6508f3242dca2f05251ee08e642 2024-11-13T00:30:40,374 INFO [M:0;cb28580903fb:46601 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/da6ac6508f3242dca2f05251ee08e642, entries=6, sequenceid=56, filesize=6.0 K 2024-11-13T00:30:40,375 DEBUG [M:0;cb28580903fb:46601 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/35bf7334b5db44379f096d575ef77007 as hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/35bf7334b5db44379f096d575ef77007 2024-11-13T00:30:40,379 INFO [M:0;cb28580903fb:46601 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/35bf7334b5db44379f096d575ef77007, entries=1, sequenceid=56, filesize=5.0 K 2024-11-13T00:30:40,380 DEBUG [M:0;cb28580903fb:46601 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8ac7fae96f1b45d08450150a7f68fd03 as hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8ac7fae96f1b45d08450150a7f68fd03 2024-11-13T00:30:40,385 INFO [M:0;cb28580903fb:46601 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8ac7fae96f1b45d08450150a7f68fd03, entries=1, sequenceid=56, filesize=4.9 K 2024-11-13T00:30:40,386 INFO [M:0;cb28580903fb:46601 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=56, compaction requested=false 2024-11-13T00:30:40,387 INFO [M:0;cb28580903fb:46601 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:40,387 DEBUG [M:0;cb28580903fb:46601 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457840257Disabling compacts and flushes for region at 1731457840257Disabling writes for close at 1731457840258 (+1 ms)Obtaining lock to block concurrent updates at 1731457840258Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731457840258Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731457840258Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731457840264 (+6 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731457840265 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731457840278 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731457840278Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731457840289 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731457840302 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731457840302Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731457840314 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731457840328 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731457840328Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731457840338 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731457840352 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731457840352Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5950eb85: reopening flushed file at 1731457840363 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@448430e7: reopening flushed file at 1731457840368 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1d186639: reopening flushed file at 1731457840374 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1cc8ba5a: reopening flushed file at 1731457840379 (+5 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=56, compaction requested=false at 1731457840386 (+7 ms)Writing region close event to WAL at 1731457840387 (+1 ms)Closed at 1731457840387 2024-11-13T00:30:40,388 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,388 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,388 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,388 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,388 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:30:40,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42603 is added to blk_1073741847_1031 (size=757) 2024-11-13T00:30:40,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37719 is added to blk_1073741847_1031 (size=757) 2024-11-13T00:30:40,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:40,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:40,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:41,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,397 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,400 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:41,907 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T00:30:41,908 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:41,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:41,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:42,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:42,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:42,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:43,252 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-13T00:30:43,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:43,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:43,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:44,265 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282/cb28580903fb%2C46601%2C1731457810282.1731457810495 after 4001ms 2024-11-13T00:30:44,265 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/WALs/cb28580903fb,46601,1731457810282/cb28580903fb%2C46601%2C1731457810282.1731457810495 to hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/oldWALs/cb28580903fb%2C46601%2C1731457810282.1731457810495 2024-11-13T00:30:44,268 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/MasterData/oldWALs/cb28580903fb%2C46601%2C1731457810282.1731457810495 to hdfs://localhost:43143/user/jenkins/test-data/68f23f88-b9bc-298c-0df7-cd8f2616a41b/oldWALs/cb28580903fb%2C46601%2C1731457810282.1731457810495$masterlocalwal$ 2024-11-13T00:30:44,269 INFO [M:0;cb28580903fb:46601 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T00:30:44,269 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:30:44,269 INFO [M:0;cb28580903fb:46601 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46601 2024-11-13T00:30:44,269 INFO [M:0;cb28580903fb:46601 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:30:44,373 INFO [M:0;cb28580903fb:46601 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:30:44,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:30:44,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46601-0x1010ebb69e80000, quorum=127.0.0.1:58313, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:30:44,375 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6a3feff9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:44,375 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@708b22ef{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:30:44,375 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:30:44,376 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dfe1abd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:30:44,376 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10a15b18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,STOPPED} 2024-11-13T00:30:44,377 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:30:44,377 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:30:44,377 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:30:44,377 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-472515239-172.17.0.2-1731457809380 (Datanode Uuid 98a15cd0-243c-422e-9014-af5f2d458c5f) service to localhost/127.0.0.1:43143 2024-11-13T00:30:44,377 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data3/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:44,378 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data4/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:44,378 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:30:44,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@53bd4263{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:44,380 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60ff2da2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:30:44,380 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:30:44,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e5c1ba8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:30:44,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e617473{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,STOPPED} 2024-11-13T00:30:44,381 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:30:44,381 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:30:44,381 WARN [BP-472515239-172.17.0.2-1731457809380 heartbeating to localhost/127.0.0.1:43143 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-472515239-172.17.0.2-1731457809380 (Datanode Uuid 3eab685d-3b03-4ec1-ba04-2c0b82a3601d) service to localhost/127.0.0.1:43143 2024-11-13T00:30:44,381 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:30:44,382 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data1/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:44,382 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/cluster_53488b2f-acaa-1518-ad8c-418752e5a340/data/data2/current/BP-472515239-172.17.0.2-1731457809380 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:30:44,382 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:30:44,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10036843{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:30:44,388 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a2900d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:30:44,388 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:30:44,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@278324d2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:30:44,388 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b091339{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir/,STOPPED} 2024-11-13T00:30:44,394 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T00:30:44,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T00:30:44,418 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 157) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:43143 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:43143 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43143 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:43143 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43143 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:43143 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43143 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43143 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 440) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=198 (was 211), ProcessCount=11 (was 11), AvailableMemoryMB=1292 (was 1727) 2024-11-13T00:30:44,426 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=198, ProcessCount=11, AvailableMemoryMB=1292 2024-11-13T00:30:44,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T00:30:44,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.log.dir so I do NOT create it in target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a 2024-11-13T00:30:44,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/82264a5e-14d0-268b-aec5-074583ae0e1c/hadoop.tmp.dir so I do NOT create it in target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a 2024-11-13T00:30:44,426 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68, deleteOnExit=true 2024-11-13T00:30:44,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T00:30:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/test.cache.data in system properties and HBase conf 2024-11-13T00:30:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T00:30:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/hadoop.log.dir in system properties and HBase conf 2024-11-13T00:30:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T00:30:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T00:30:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T00:30:44,427 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T00:30:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:30:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:30:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T00:30:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:30:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T00:30:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T00:30:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:30:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:30:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T00:30:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/nfs.dump.dir in system properties and HBase conf 2024-11-13T00:30:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/java.io.tmpdir in system properties and HBase conf 2024-11-13T00:30:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:30:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T00:30:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T00:30:44,441 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:30:44,511 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:44,514 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:30:44,515 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:30:44,516 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:30:44,516 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:30:44,516 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:44,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d795589{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:30:44,517 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@391cb626{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:30:44,632 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@683927ba{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/java.io.tmpdir/jetty-localhost-42825-hadoop-hdfs-3_4_1-tests_jar-_-any-16450201052314951908/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:30:44,632 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@46ff36eb{HTTP/1.1, (http/1.1)}{localhost:42825} 2024-11-13T00:30:44,632 INFO [Time-limited test {}] server.Server(415): Started @190480ms 2024-11-13T00:30:44,645 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:30:44,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:44,695 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:44,698 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:30:44,698 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:30:44,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:30:44,699 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:30:44,699 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18d07610{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:30:44,699 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6778d1f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:30:44,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2fe1acb9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/java.io.tmpdir/jetty-localhost-39293-hadoop-hdfs-3_4_1-tests_jar-_-any-15709534216112825252/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:44,813 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1781cd99{HTTP/1.1, (http/1.1)}{localhost:39293} 2024-11-13T00:30:44,813 INFO [Time-limited test {}] server.Server(415): Started @190661ms 2024-11-13T00:30:44,814 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:30:44,851 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:30:44,855 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:30:44,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:30:44,856 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:30:44,856 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:30:44,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ee92539{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:30:44,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f2fd5ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:30:44,931 WARN [Thread-1656 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/data/data1/current/BP-1184649482-172.17.0.2-1731457844458/current, will proceed with Du for space computation calculation, 2024-11-13T00:30:44,931 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/data/data2/current/BP-1184649482-172.17.0.2-1731457844458/current, will proceed with Du for space computation calculation, 2024-11-13T00:30:44,948 WARN [Thread-1635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:30:44,950 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x85e099e2cbbd603a with lease ID 0x9578aab3e4000710: Processing first storage report for DS-3f6f6e81-43ab-4f5b-a6cf-70b543f4df40 from datanode DatanodeRegistration(127.0.0.1:46085, datanodeUuid=d195dff4-3752-4c6a-8f06-df5619606dc9, infoPort=41921, infoSecurePort=0, ipcPort=37589, storageInfo=lv=-57;cid=testClusterID;nsid=1690816239;c=1731457844458) 2024-11-13T00:30:44,951 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x85e099e2cbbd603a with lease ID 0x9578aab3e4000710: from storage DS-3f6f6e81-43ab-4f5b-a6cf-70b543f4df40 node DatanodeRegistration(127.0.0.1:46085, datanodeUuid=d195dff4-3752-4c6a-8f06-df5619606dc9, infoPort=41921, infoSecurePort=0, ipcPort=37589, storageInfo=lv=-57;cid=testClusterID;nsid=1690816239;c=1731457844458), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-13T00:30:44,951 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x85e099e2cbbd603a with lease ID 0x9578aab3e4000710: Processing first storage report for DS-22b0c83c-bd6e-4e59-83e7-553da95b56c2 from datanode DatanodeRegistration(127.0.0.1:46085, datanodeUuid=d195dff4-3752-4c6a-8f06-df5619606dc9, infoPort=41921, infoSecurePort=0, ipcPort=37589, storageInfo=lv=-57;cid=testClusterID;nsid=1690816239;c=1731457844458) 2024-11-13T00:30:44,951 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x85e099e2cbbd603a with lease ID 0x9578aab3e4000710: from storage DS-22b0c83c-bd6e-4e59-83e7-553da95b56c2 node DatanodeRegistration(127.0.0.1:46085, datanodeUuid=d195dff4-3752-4c6a-8f06-df5619606dc9, infoPort=41921, infoSecurePort=0, ipcPort=37589, storageInfo=lv=-57;cid=testClusterID;nsid=1690816239;c=1731457844458), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:44,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:44,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:44,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@66db372d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/java.io.tmpdir/jetty-localhost-46849-hadoop-hdfs-3_4_1-tests_jar-_-any-4076757328788862418/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:30:44,987 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f06ed2{HTTP/1.1, (http/1.1)}{localhost:46849} 2024-11-13T00:30:44,987 INFO [Time-limited test {}] server.Server(415): Started @190835ms 2024-11-13T00:30:44,988 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:30:45,094 WARN [Thread-1682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/data/data3/current/BP-1184649482-172.17.0.2-1731457844458/current, will proceed with Du for space computation calculation, 2024-11-13T00:30:45,094 WARN [Thread-1683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/data/data4/current/BP-1184649482-172.17.0.2-1731457844458/current, will proceed with Du for space computation calculation, 2024-11-13T00:30:45,110 WARN [Thread-1671 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:30:45,112 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb71526ad0256e0a with lease ID 0x9578aab3e4000711: Processing first storage report for DS-ddf17b65-5d3a-4a05-9bd6-3234742bf621 from datanode DatanodeRegistration(127.0.0.1:38553, datanodeUuid=7766541c-f1e7-4073-9ed8-7d3188f3da13, infoPort=41235, infoSecurePort=0, ipcPort=33457, storageInfo=lv=-57;cid=testClusterID;nsid=1690816239;c=1731457844458) 2024-11-13T00:30:45,112 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb71526ad0256e0a with lease ID 0x9578aab3e4000711: from storage DS-ddf17b65-5d3a-4a05-9bd6-3234742bf621 node DatanodeRegistration(127.0.0.1:38553, datanodeUuid=7766541c-f1e7-4073-9ed8-7d3188f3da13, infoPort=41235, infoSecurePort=0, ipcPort=33457, storageInfo=lv=-57;cid=testClusterID;nsid=1690816239;c=1731457844458), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:45,112 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb71526ad0256e0a with lease ID 0x9578aab3e4000711: Processing first storage report for DS-fc3948a4-74df-4ff4-ad6b-38b359bcfb8a from datanode DatanodeRegistration(127.0.0.1:38553, datanodeUuid=7766541c-f1e7-4073-9ed8-7d3188f3da13, infoPort=41235, infoSecurePort=0, ipcPort=33457, storageInfo=lv=-57;cid=testClusterID;nsid=1690816239;c=1731457844458) 2024-11-13T00:30:45,113 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb71526ad0256e0a with lease ID 0x9578aab3e4000711: from storage DS-fc3948a4-74df-4ff4-ad6b-38b359bcfb8a node DatanodeRegistration(127.0.0.1:38553, datanodeUuid=7766541c-f1e7-4073-9ed8-7d3188f3da13, infoPort=41235, infoSecurePort=0, ipcPort=33457, storageInfo=lv=-57;cid=testClusterID;nsid=1690816239;c=1731457844458), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:30:45,211 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a 2024-11-13T00:30:45,213 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/zookeeper_0, clientPort=58448, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T00:30:45,214 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58448 2024-11-13T00:30:45,215 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:45,216 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:45,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:30:45,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:30:45,225 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922 with version=8 2024-11-13T00:30:45,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/hbase-staging 2024-11-13T00:30:45,227 INFO [Time-limited test {}] client.ConnectionUtils(128): master/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:30:45,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:45,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:45,227 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:30:45,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:45,227 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:30:45,227 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T00:30:45,227 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:30:45,228 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33675 2024-11-13T00:30:45,229 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33675 connecting to ZooKeeper ensemble=127.0.0.1:58448 2024-11-13T00:30:45,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:336750x0, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:30:45,236 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33675-0x1010ebbf26d0000 connected 2024-11-13T00:30:45,250 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:45,251 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:45,253 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:30:45,253 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922, hbase.cluster.distributed=false 2024-11-13T00:30:45,254 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:30:45,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33675 2024-11-13T00:30:45,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33675 2024-11-13T00:30:45,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33675 2024-11-13T00:30:45,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33675 2024-11-13T00:30:45,255 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33675 2024-11-13T00:30:45,270 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:30:45,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:45,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:45,270 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:30:45,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:30:45,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:30:45,270 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T00:30:45,271 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:30:45,271 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35993 2024-11-13T00:30:45,273 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35993 connecting to ZooKeeper ensemble=127.0.0.1:58448 2024-11-13T00:30:45,274 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:45,276 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:45,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359930x0, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:30:45,281 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359930x0, quorum=127.0.0.1:58448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:30:45,281 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35993-0x1010ebbf26d0001 connected 2024-11-13T00:30:45,281 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T00:30:45,282 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T00:30:45,282 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T00:30:45,283 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:30:45,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35993 2024-11-13T00:30:45,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35993 2024-11-13T00:30:45,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35993 2024-11-13T00:30:45,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35993 2024-11-13T00:30:45,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35993 2024-11-13T00:30:45,295 DEBUG [M:0;cb28580903fb:33675 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;cb28580903fb:33675 2024-11-13T00:30:45,296 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/cb28580903fb,33675,1731457845226 2024-11-13T00:30:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:30:45,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:30:45,298 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/cb28580903fb,33675,1731457845226 2024-11-13T00:30:45,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T00:30:45,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,299 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T00:30:45,300 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/cb28580903fb,33675,1731457845226 from backup master directory 2024-11-13T00:30:45,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:30:45,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/cb28580903fb,33675,1731457845226 2024-11-13T00:30:45,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:30:45,301 WARN [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:30:45,301 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=cb28580903fb,33675,1731457845226 2024-11-13T00:30:45,305 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/hbase.id] with ID: d2c9ab16-8e9e-41d2-8037-8f7b49485dd8 2024-11-13T00:30:45,305 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/.tmp/hbase.id 2024-11-13T00:30:45,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:30:45,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:30:45,310 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/.tmp/hbase.id]:[hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/hbase.id] 2024-11-13T00:30:45,320 INFO [master/cb28580903fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:45,320 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T00:30:45,321 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T00:30:45,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:30:45,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:30:45,330 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:30:45,331 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T00:30:45,331 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:30:45,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:30:45,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:30:45,337 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store 2024-11-13T00:30:45,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:30:45,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:30:45,345 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:45,345 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:30:45,345 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:45,345 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:45,345 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:30:45,345 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:45,345 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:30:45,345 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457845345Disabling compacts and flushes for region at 1731457845345Disabling writes for close at 1731457845345Writing region close event to WAL at 1731457845345Closed at 1731457845345 2024-11-13T00:30:45,346 WARN [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/.initializing 2024-11-13T00:30:45,346 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/WALs/cb28580903fb,33675,1731457845226 2024-11-13T00:30:45,348 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C33675%2C1731457845226, suffix=, logDir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/WALs/cb28580903fb,33675,1731457845226, archiveDir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/oldWALs, maxLogs=10 2024-11-13T00:30:45,348 INFO [master/cb28580903fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C33675%2C1731457845226.1731457845348 2024-11-13T00:30:45,352 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/WALs/cb28580903fb,33675,1731457845226/cb28580903fb%2C33675%2C1731457845226.1731457845348 2024-11-13T00:30:45,355 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41235:41235),(127.0.0.1/127.0.0.1:41921:41921)] 2024-11-13T00:30:45,357 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:30:45,357 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:45,357 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,357 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,358 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T00:30:45,359 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:45,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:45,360 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T00:30:45,361 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:45,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:30:45,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T00:30:45,362 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:45,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:30:45,363 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T00:30:45,364 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:45,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:30:45,364 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,365 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,365 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,366 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,366 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,367 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T00:30:45,368 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:30:45,370 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:30:45,370 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=842850, jitterRate=0.07174032926559448}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T00:30:45,371 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731457845357Initializing all the Stores at 1731457845358 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457845358Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457845358Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457845358Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457845358Cleaning up temporary data from old regions at 1731457845366 (+8 ms)Region opened successfully at 1731457845371 (+5 ms) 2024-11-13T00:30:45,371 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T00:30:45,374 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71da4bf5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:30:45,375 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T00:30:45,375 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T00:30:45,375 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T00:30:45,375 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T00:30:45,375 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T00:30:45,376 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T00:30:45,376 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T00:30:45,377 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T00:30:45,378 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T00:30:45,379 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T00:30:45,379 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T00:30:45,380 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T00:30:45,381 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T00:30:45,381 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T00:30:45,382 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T00:30:45,383 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T00:30:45,384 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T00:30:45,387 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T00:30:45,388 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T00:30:45,389 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T00:30:45,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:30:45,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:30:45,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,391 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=cb28580903fb,33675,1731457845226, sessionid=0x1010ebbf26d0000, setting cluster-up flag (Was=false) 2024-11-13T00:30:45,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,399 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T00:30:45,400 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,33675,1731457845226 2024-11-13T00:30:45,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,409 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T00:30:45,410 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,33675,1731457845226 2024-11-13T00:30:45,411 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T00:30:45,413 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T00:30:45,413 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T00:30:45,413 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T00:30:45,414 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: cb28580903fb,33675,1731457845226 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T00:30:45,415 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:30:45,415 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:30:45,415 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:30:45,415 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:30:45,415 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/cb28580903fb:0, corePoolSize=10, maxPoolSize=10 2024-11-13T00:30:45,415 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,415 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:30:45,415 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,416 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731457875416 2024-11-13T00:30:45,416 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T00:30:45,416 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T00:30:45,416 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T00:30:45,416 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T00:30:45,416 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T00:30:45,416 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T00:30:45,417 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:30:45,417 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T00:30:45,418 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:45,419 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T00:30:45,420 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,421 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T00:30:45,421 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T00:30:45,421 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T00:30:45,421 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T00:30:45,421 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T00:30:45,422 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457845421,5,FailOnTimeoutGroup] 2024-11-13T00:30:45,422 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457845422,5,FailOnTimeoutGroup] 2024-11-13T00:30:45,422 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,422 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T00:30:45,422 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,422 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:30:45,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:30:45,426 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T00:30:45,427 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922 2024-11-13T00:30:45,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:30:45,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:30:45,438 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:45,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:30:45,442 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:30:45,442 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:45,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:45,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:30:45,444 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:30:45,444 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:45,444 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:45,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:30:45,446 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:30:45,446 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:45,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:45,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:30:45,447 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:30:45,447 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:45,448 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:45,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:30:45,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740 2024-11-13T00:30:45,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740 2024-11-13T00:30:45,450 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:30:45,450 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:30:45,450 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:30:45,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:30:45,453 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:30:45,454 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=741248, jitterRate=-0.057455047965049744}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:30:45,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731457845438Initializing all the Stores at 1731457845438Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457845438Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457845440 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457845440Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457845440Cleaning up temporary data from old regions at 1731457845450 (+10 ms)Region opened successfully at 1731457845454 (+4 ms) 2024-11-13T00:30:45,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:30:45,454 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:30:45,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:30:45,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:30:45,454 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:30:45,455 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:30:45,455 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457845454Disabling compacts and flushes for region at 1731457845454Disabling writes for close at 1731457845454Writing region close event to WAL at 1731457845455 (+1 ms)Closed at 1731457845455 2024-11-13T00:30:45,456 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:30:45,456 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T00:30:45,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T00:30:45,458 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:30:45,459 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T00:30:45,486 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(746): ClusterId : d2c9ab16-8e9e-41d2-8037-8f7b49485dd8 2024-11-13T00:30:45,486 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T00:30:45,488 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T00:30:45,488 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T00:30:45,490 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T00:30:45,490 DEBUG [RS:0;cb28580903fb:35993 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@773302d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:30:45,502 DEBUG [RS:0;cb28580903fb:35993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;cb28580903fb:35993 2024-11-13T00:30:45,502 INFO [RS:0;cb28580903fb:35993 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T00:30:45,502 INFO [RS:0;cb28580903fb:35993 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T00:30:45,502 DEBUG [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T00:30:45,503 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(2659): reportForDuty to master=cb28580903fb,33675,1731457845226 with port=35993, startcode=1731457845270 2024-11-13T00:30:45,503 DEBUG [RS:0;cb28580903fb:35993 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T00:30:45,505 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43837, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T00:30:45,506 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33675 {}] master.ServerManager(363): Checking decommissioned status of RegionServer cb28580903fb,35993,1731457845270 2024-11-13T00:30:45,506 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33675 {}] master.ServerManager(517): Registering regionserver=cb28580903fb,35993,1731457845270 2024-11-13T00:30:45,507 DEBUG [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922 2024-11-13T00:30:45,507 DEBUG [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39533 2024-11-13T00:30:45,507 DEBUG [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T00:30:45,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:30:45,509 DEBUG [RS:0;cb28580903fb:35993 {}] zookeeper.ZKUtil(111): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/cb28580903fb,35993,1731457845270 2024-11-13T00:30:45,509 WARN [RS:0;cb28580903fb:35993 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:30:45,510 INFO [RS:0;cb28580903fb:35993 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:30:45,510 DEBUG [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270 2024-11-13T00:30:45,510 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [cb28580903fb,35993,1731457845270] 2024-11-13T00:30:45,513 INFO [RS:0;cb28580903fb:35993 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T00:30:45,514 INFO [RS:0;cb28580903fb:35993 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T00:30:45,515 INFO [RS:0;cb28580903fb:35993 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T00:30:45,515 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,516 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T00:30:45,517 INFO [RS:0;cb28580903fb:35993 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T00:30:45,517 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:30:45,517 DEBUG [RS:0;cb28580903fb:35993 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:30:45,518 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,518 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,518 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,518 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,518 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,518 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35993,1731457845270-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:30:45,533 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T00:30:45,533 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35993,1731457845270-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,533 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,533 INFO [RS:0;cb28580903fb:35993 {}] regionserver.Replication(171): cb28580903fb,35993,1731457845270 started 2024-11-13T00:30:45,547 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:45,547 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(1482): Serving as cb28580903fb,35993,1731457845270, RpcServer on cb28580903fb/172.17.0.2:35993, sessionid=0x1010ebbf26d0001 2024-11-13T00:30:45,547 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T00:30:45,547 DEBUG [RS:0;cb28580903fb:35993 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager cb28580903fb,35993,1731457845270 2024-11-13T00:30:45,547 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,35993,1731457845270' 2024-11-13T00:30:45,547 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T00:30:45,548 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T00:30:45,548 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T00:30:45,548 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T00:30:45,548 DEBUG [RS:0;cb28580903fb:35993 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager cb28580903fb,35993,1731457845270 2024-11-13T00:30:45,548 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,35993,1731457845270' 2024-11-13T00:30:45,548 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T00:30:45,549 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T00:30:45,549 DEBUG [RS:0;cb28580903fb:35993 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T00:30:45,549 INFO [RS:0;cb28580903fb:35993 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T00:30:45,549 INFO [RS:0;cb28580903fb:35993 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T00:30:45,610 WARN [cb28580903fb:33675 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T00:30:45,651 INFO [RS:0;cb28580903fb:35993 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C35993%2C1731457845270, suffix=, logDir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270, archiveDir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/oldWALs, maxLogs=32 2024-11-13T00:30:45,651 INFO [RS:0;cb28580903fb:35993 {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35993%2C1731457845270.1731457845651 2024-11-13T00:30:45,657 INFO [RS:0;cb28580903fb:35993 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.1731457845651 2024-11-13T00:30:45,658 DEBUG [RS:0;cb28580903fb:35993 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41235:41235),(127.0.0.1/127.0.0.1:41921:41921)] 2024-11-13T00:30:45,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:45,860 DEBUG [cb28580903fb:33675 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T00:30:45,860 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=cb28580903fb,35993,1731457845270 2024-11-13T00:30:45,862 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,35993,1731457845270, state=OPENING 2024-11-13T00:30:45,864 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T00:30:45,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:30:45,866 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:30:45,866 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:30:45,866 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:30:45,866 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,35993,1731457845270}] 2024-11-13T00:30:45,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:45,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:46,019 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T00:30:46,021 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55255, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T00:30:46,024 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T00:30:46,024 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:30:46,026 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C35993%2C1731457845270.meta, suffix=.meta, logDir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270, archiveDir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/oldWALs, maxLogs=32 2024-11-13T00:30:46,026 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35993%2C1731457845270.meta.1731457846026.meta 2024-11-13T00:30:46,031 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.meta.1731457846026.meta 2024-11-13T00:30:46,032 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41921:41921),(127.0.0.1/127.0.0.1:41235:41235)] 2024-11-13T00:30:46,035 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:30:46,035 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T00:30:46,035 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T00:30:46,035 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T00:30:46,035 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T00:30:46,035 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:46,035 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T00:30:46,035 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T00:30:46,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:30:46,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:30:46,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:46,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:46,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:30:46,039 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:30:46,039 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:46,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:46,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:30:46,040 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:30:46,040 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:46,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:46,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:30:46,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:30:46,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:46,041 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:30:46,041 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:30:46,042 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740 2024-11-13T00:30:46,043 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740 2024-11-13T00:30:46,044 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:30:46,044 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:30:46,044 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:30:46,045 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:30:46,046 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=727254, jitterRate=-0.07524929940700531}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:30:46,046 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T00:30:46,046 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731457846035Writing region info on filesystem at 1731457846035Initializing all the Stores at 1731457846036 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457846036Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457846037 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457846037Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457846037Cleaning up temporary data from old regions at 1731457846044 (+7 ms)Running coprocessor post-open hooks at 1731457846046 (+2 ms)Region opened successfully at 1731457846046 2024-11-13T00:30:46,047 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731457846019 2024-11-13T00:30:46,050 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T00:30:46,050 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T00:30:46,051 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,35993,1731457845270 2024-11-13T00:30:46,051 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,35993,1731457845270, state=OPEN 2024-11-13T00:30:46,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:30:46,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:30:46,057 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=cb28580903fb,35993,1731457845270 2024-11-13T00:30:46,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:30:46,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:30:46,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T00:30:46,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,35993,1731457845270 in 191 msec 2024-11-13T00:30:46,062 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T00:30:46,062 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 604 msec 2024-11-13T00:30:46,063 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:30:46,063 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T00:30:46,064 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:30:46,064 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,35993,1731457845270, seqNum=-1] 2024-11-13T00:30:46,065 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:30:46,066 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55279, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:30:46,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 658 msec 2024-11-13T00:30:46,071 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731457846071, completionTime=-1 2024-11-13T00:30:46,071 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T00:30:46,071 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T00:30:46,072 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T00:30:46,072 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731457906072 2024-11-13T00:30:46,072 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731457966072 2024-11-13T00:30:46,072 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-13T00:30:46,073 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,33675,1731457845226-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:46,073 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,33675,1731457845226-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:46,073 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,33675,1731457845226-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:46,073 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-cb28580903fb:33675, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:46,073 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:46,073 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:46,074 DEBUG [master/cb28580903fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T00:30:46,076 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.775sec 2024-11-13T00:30:46,076 INFO [master/cb28580903fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T00:30:46,076 INFO [master/cb28580903fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T00:30:46,076 INFO [master/cb28580903fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T00:30:46,076 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T00:30:46,076 INFO [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T00:30:46,076 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,33675,1731457845226-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:30:46,076 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,33675,1731457845226-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T00:30:46,078 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T00:30:46,078 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T00:30:46,078 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,33675,1731457845226-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:30:46,086 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52ab3a78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:30:46,086 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request cb28580903fb,33675,-1 for getting cluster id 2024-11-13T00:30:46,086 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T00:30:46,088 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd2c9ab16-8e9e-41d2-8037-8f7b49485dd8' 2024-11-13T00:30:46,088 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T00:30:46,088 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d2c9ab16-8e9e-41d2-8037-8f7b49485dd8" 2024-11-13T00:30:46,088 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e05d91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:30:46,089 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [cb28580903fb,33675,-1] 2024-11-13T00:30:46,089 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T00:30:46,089 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:30:46,090 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35372, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T00:30:46,091 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a7a85d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:30:46,092 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:30:46,093 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,35993,1731457845270, seqNum=-1] 2024-11-13T00:30:46,093 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:30:46,094 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48410, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:30:46,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=cb28580903fb,33675,1731457845226 2024-11-13T00:30:46,095 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:30:46,098 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T00:30:46,098 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T00:30:46,099 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is cb28580903fb,33675,1731457845226 2024-11-13T00:30:46,099 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4805cd46 2024-11-13T00:30:46,099 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T00:30:46,100 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35386, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T00:30:46,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T00:30:46,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T00:30:46,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:30:46,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:30:46,103 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T00:30:46,103 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:46,104 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-13T00:30:46,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T00:30:46,105 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T00:30:46,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741835_1011 (size=405) 2024-11-13T00:30:46,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741835_1011 (size=405) 2024-11-13T00:30:46,112 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 77c503dfbc138df345484c11abcb0eb4, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922 2024-11-13T00:30:46,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741836_1012 (size=88) 2024-11-13T00:30:46,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741836_1012 (size=88) 2024-11-13T00:30:46,118 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:46,119 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 77c503dfbc138df345484c11abcb0eb4, disabling compactions & flushes 2024-11-13T00:30:46,119 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:30:46,119 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:30:46,119 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. after waiting 0 ms 2024-11-13T00:30:46,119 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:30:46,119 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:30:46,119 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 77c503dfbc138df345484c11abcb0eb4: Waiting for close lock at 1731457846119Disabling compacts and flushes for region at 1731457846119Disabling writes for close at 1731457846119Writing region close event to WAL at 1731457846119Closed at 1731457846119 2024-11-13T00:30:46,120 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T00:30:46,120 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731457846120"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731457846120"}]},"ts":"1731457846120"} 2024-11-13T00:30:46,123 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T00:30:46,124 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T00:30:46,124 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731457846124"}]},"ts":"1731457846124"} 2024-11-13T00:30:46,126 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-13T00:30:46,126 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=77c503dfbc138df345484c11abcb0eb4, ASSIGN}] 2024-11-13T00:30:46,128 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=77c503dfbc138df345484c11abcb0eb4, ASSIGN 2024-11-13T00:30:46,129 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=77c503dfbc138df345484c11abcb0eb4, ASSIGN; state=OFFLINE, location=cb28580903fb,35993,1731457845270; forceNewPlan=false, retain=false 2024-11-13T00:30:46,280 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=77c503dfbc138df345484c11abcb0eb4, regionState=OPENING, regionLocation=cb28580903fb,35993,1731457845270 2024-11-13T00:30:46,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=77c503dfbc138df345484c11abcb0eb4, ASSIGN because future has completed 2024-11-13T00:30:46,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 77c503dfbc138df345484c11abcb0eb4, server=cb28580903fb,35993,1731457845270}] 2024-11-13T00:30:46,439 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:30:46,439 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 77c503dfbc138df345484c11abcb0eb4, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:30:46,440 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,440 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:30:46,440 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,440 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,441 INFO [StoreOpener-77c503dfbc138df345484c11abcb0eb4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,442 INFO [StoreOpener-77c503dfbc138df345484c11abcb0eb4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77c503dfbc138df345484c11abcb0eb4 columnFamilyName info 2024-11-13T00:30:46,442 DEBUG [StoreOpener-77c503dfbc138df345484c11abcb0eb4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:30:46,443 INFO [StoreOpener-77c503dfbc138df345484c11abcb0eb4-1 {}] regionserver.HStore(327): Store=77c503dfbc138df345484c11abcb0eb4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:30:46,443 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,443 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,443 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,444 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,444 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,445 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,447 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:30:46,448 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 77c503dfbc138df345484c11abcb0eb4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876199, jitterRate=0.11414554715156555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T00:30:46,448 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:30:46,448 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 77c503dfbc138df345484c11abcb0eb4: Running coprocessor pre-open hook at 1731457846440Writing region info on filesystem at 1731457846440Initializing all the Stores at 1731457846441 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457846441Cleaning up temporary data from old regions at 1731457846444 (+3 ms)Running coprocessor post-open hooks at 1731457846448 (+4 ms)Region opened successfully at 1731457846448 2024-11-13T00:30:46,449 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4., pid=6, masterSystemTime=1731457846435 2024-11-13T00:30:46,452 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:30:46,452 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:30:46,453 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=77c503dfbc138df345484c11abcb0eb4, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,35993,1731457845270 2024-11-13T00:30:46,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 77c503dfbc138df345484c11abcb0eb4, server=cb28580903fb,35993,1731457845270 because future has completed 2024-11-13T00:30:46,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T00:30:46,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 77c503dfbc138df345484c11abcb0eb4, server=cb28580903fb,35993,1731457845270 in 173 msec 2024-11-13T00:30:46,460 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T00:30:46,460 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=77c503dfbc138df345484c11abcb0eb4, ASSIGN in 332 msec 2024-11-13T00:30:46,461 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T00:30:46,461 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731457846461"}]},"ts":"1731457846461"} 2024-11-13T00:30:46,463 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-13T00:30:46,464 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T00:30:46,466 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 363 msec 2024-11-13T00:30:46,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:46,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:46,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:47,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:47,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:47,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:48,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:48,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:48,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:49,509 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-13T00:30:49,509 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-13T00:30:49,510 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:30:49,510 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-13T00:30:49,510 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:30:49,510 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T00:30:49,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:49,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:49,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:50,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:50,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:50,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:51,537 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T00:30:51,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,553 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,553 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,554 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,557 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,559 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:30:51,564 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T00:30:51,564 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-13T00:30:51,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:51,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:51,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:52,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:52,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:52,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:53,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:53,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:53,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:54,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:54,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:54,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:55,695 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:55,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:55,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:56,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T00:30:56,116 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T00:30:56,116 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-13T00:30:56,119 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:30:56,119 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:30:56,122 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4., hostname=cb28580903fb,35993,1731457845270, seqNum=2] 2024-11-13T00:30:56,129 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:30:56,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:30:56,136 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T00:30:56,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T00:30:56,137 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T00:30:56,139 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T00:30:56,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35993 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-13T00:30:56,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:30:56,301 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 77c503dfbc138df345484c11abcb0eb4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T00:30:56,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/c7f04cb3b9a24e7e9d04f5b8d214fe8f is 1080, key is row0001/info:/1731457856123/Put/seqid=0 2024-11-13T00:30:56,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741837_1013 (size=6033) 2024-11-13T00:30:56,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741837_1013 (size=6033) 2024-11-13T00:30:56,323 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/c7f04cb3b9a24e7e9d04f5b8d214fe8f 2024-11-13T00:30:56,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/c7f04cb3b9a24e7e9d04f5b8d214fe8f as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/c7f04cb3b9a24e7e9d04f5b8d214fe8f 2024-11-13T00:30:56,335 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/c7f04cb3b9a24e7e9d04f5b8d214fe8f, entries=1, sequenceid=5, filesize=5.9 K 2024-11-13T00:30:56,337 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 77c503dfbc138df345484c11abcb0eb4 in 35ms, sequenceid=5, compaction requested=false 2024-11-13T00:30:56,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 77c503dfbc138df345484c11abcb0eb4: 2024-11-13T00:30:56,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:30:56,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-13T00:30:56,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-13T00:30:56,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-13T00:30:56,345 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 203 msec 2024-11-13T00:30:56,347 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 215 msec 2024-11-13T00:30:56,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:56,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:56,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:57,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:57,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 after 68043ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:30:57,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:57,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:58,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:58,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:58,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:59,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:59,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:30:59,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:00,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:00,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:00,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:01,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:01,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:01,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:02,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:02,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:02,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:03,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:03,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:03,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:04,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:04,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:04,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:05,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:05,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:05,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:05,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta after 68035ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:31:05,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 after 68046ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-13T00:31:06,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-13T00:31:06,175 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T00:31:06,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:31:06,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:31:06,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-13T00:31:06,180 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T00:31:06,181 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T00:31:06,182 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T00:31:06,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35993 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-13T00:31:06,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:06,335 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 77c503dfbc138df345484c11abcb0eb4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T00:31:06,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/212451240d734a708e6e972cf281aae2 is 1080, key is row0002/info:/1731457866176/Put/seqid=0 2024-11-13T00:31:06,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741838_1014 (size=6033) 2024-11-13T00:31:06,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741838_1014 (size=6033) 2024-11-13T00:31:06,346 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/212451240d734a708e6e972cf281aae2 2024-11-13T00:31:06,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/212451240d734a708e6e972cf281aae2 as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/212451240d734a708e6e972cf281aae2 2024-11-13T00:31:06,356 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/212451240d734a708e6e972cf281aae2, entries=1, sequenceid=9, filesize=5.9 K 2024-11-13T00:31:06,357 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 77c503dfbc138df345484c11abcb0eb4 in 22ms, sequenceid=9, compaction requested=false 2024-11-13T00:31:06,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 77c503dfbc138df345484c11abcb0eb4: 2024-11-13T00:31:06,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:06,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-13T00:31:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-13T00:31:06,361 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-13T00:31:06,361 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 177 msec 2024-11-13T00:31:06,364 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 184 msec 2024-11-13T00:31:06,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:06,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:06,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:07,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:07,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:07,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:08,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:08,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:08,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:09,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:09,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:09,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:10,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:10,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:10,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:11,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:11,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:11,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:12,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:12,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:12,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:13,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:13,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:13,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:14,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:14,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:14,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:15,211 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T00:31:15,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:15,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:15,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:16,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-13T00:31:16,235 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T00:31:16,238 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35993%2C1731457845270.1731457876238 2024-11-13T00:31:16,244 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:16,244 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:16,244 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:16,244 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:16,244 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:16,244 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.1731457845651 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.1731457876238 2024-11-13T00:31:16,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741833_1009 (size=5546) 2024-11-13T00:31:16,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741833_1009 (size=5546) 2024-11-13T00:31:16,249 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41921:41921),(127.0.0.1/127.0.0.1:41235:41235)] 2024-11-13T00:31:16,250 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:31:16,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:31:16,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-13T00:31:16,252 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T00:31:16,253 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T00:31:16,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T00:31:16,406 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35993 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-13T00:31:16,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:16,406 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 77c503dfbc138df345484c11abcb0eb4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T00:31:16,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/95d3e46535da4779bb7c413bed086e01 is 1080, key is row0003/info:/1731457876236/Put/seqid=0 2024-11-13T00:31:16,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741840_1016 (size=6033) 2024-11-13T00:31:16,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741840_1016 (size=6033) 2024-11-13T00:31:16,416 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/95d3e46535da4779bb7c413bed086e01 2024-11-13T00:31:16,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/95d3e46535da4779bb7c413bed086e01 as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/95d3e46535da4779bb7c413bed086e01 2024-11-13T00:31:16,426 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/95d3e46535da4779bb7c413bed086e01, entries=1, sequenceid=13, filesize=5.9 K 2024-11-13T00:31:16,427 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 77c503dfbc138df345484c11abcb0eb4 in 21ms, sequenceid=13, compaction requested=true 2024-11-13T00:31:16,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 77c503dfbc138df345484c11abcb0eb4: 2024-11-13T00:31:16,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:16,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-13T00:31:16,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-13T00:31:16,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-13T00:31:16,432 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-13T00:31:16,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 183 msec 2024-11-13T00:31:16,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:16,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:16,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:17,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:17,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:17,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:18,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:18,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:18,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:19,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:19,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:19,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:20,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:20,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:20,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:21,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:21,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:21,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:22,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:22,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:22,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:23,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:23,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:23,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:24,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:24,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:24,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:25,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:25,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:25,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:26,079 INFO [master/cb28580903fb:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-13T00:31:26,079 INFO [master/cb28580903fb:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-13T00:31:26,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-13T00:31:26,276 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T00:31:26,276 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:31:26,277 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:31:26,277 DEBUG [Time-limited test {}] regionserver.HStore(1541): 77c503dfbc138df345484c11abcb0eb4/info is initiating minor compaction (all files) 2024-11-13T00:31:26,277 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T00:31:26,277 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:26,277 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 77c503dfbc138df345484c11abcb0eb4/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:26,277 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/c7f04cb3b9a24e7e9d04f5b8d214fe8f, hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/212451240d734a708e6e972cf281aae2, hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/95d3e46535da4779bb7c413bed086e01] into tmpdir=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp, totalSize=17.7 K 2024-11-13T00:31:26,278 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c7f04cb3b9a24e7e9d04f5b8d214fe8f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731457856123 2024-11-13T00:31:26,278 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 212451240d734a708e6e972cf281aae2, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731457866176 2024-11-13T00:31:26,278 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 95d3e46535da4779bb7c413bed086e01, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731457876236 2024-11-13T00:31:26,288 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 77c503dfbc138df345484c11abcb0eb4#info#compaction#47 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:31:26,289 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/476de2ceb5cc45139fe4d55b4b40b513 is 1080, key is row0001/info:/1731457856123/Put/seqid=0 2024-11-13T00:31:26,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741841_1017 (size=8296) 2024-11-13T00:31:26,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741841_1017 (size=8296) 2024-11-13T00:31:26,298 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/476de2ceb5cc45139fe4d55b4b40b513 as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/476de2ceb5cc45139fe4d55b4b40b513 2024-11-13T00:31:26,304 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 77c503dfbc138df345484c11abcb0eb4/info of 77c503dfbc138df345484c11abcb0eb4 into 476de2ceb5cc45139fe4d55b4b40b513(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:31:26,304 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 77c503dfbc138df345484c11abcb0eb4: 2024-11-13T00:31:26,307 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35993%2C1731457845270.1731457886307 2024-11-13T00:31:26,312 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:26,312 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:26,312 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:26,312 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:26,312 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:26,312 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.1731457876238 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.1731457886307 2024-11-13T00:31:26,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741839_1015 (size=2520) 2024-11-13T00:31:26,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741839_1015 (size=2520) 2024-11-13T00:31:26,314 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41921:41921),(127.0.0.1/127.0.0.1:41235:41235)] 2024-11-13T00:31:26,315 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.1731457845651 to hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/oldWALs/cb28580903fb%2C35993%2C1731457845270.1731457845651 2024-11-13T00:31:26,315 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:31:26,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:31:26,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-13T00:31:26,318 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-13T00:31:26,318 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-13T00:31:26,318 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-13T00:31:26,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35993 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-13T00:31:26,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:26,472 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 77c503dfbc138df345484c11abcb0eb4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T00:31:26,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/85d308a4d3034c85bd2312288679bb68 is 1080, key is row0000/info:/1731457886305/Put/seqid=0 2024-11-13T00:31:26,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741843_1019 (size=6033) 2024-11-13T00:31:26,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741843_1019 (size=6033) 2024-11-13T00:31:26,481 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/85d308a4d3034c85bd2312288679bb68 2024-11-13T00:31:26,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/85d308a4d3034c85bd2312288679bb68 as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/85d308a4d3034c85bd2312288679bb68 2024-11-13T00:31:26,491 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/85d308a4d3034c85bd2312288679bb68, entries=1, sequenceid=18, filesize=5.9 K 2024-11-13T00:31:26,492 INFO [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 77c503dfbc138df345484c11abcb0eb4 in 21ms, sequenceid=18, compaction requested=false 2024-11-13T00:31:26,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 77c503dfbc138df345484c11abcb0eb4: 2024-11-13T00:31:26,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:26,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-13T00:31:26,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-13T00:31:26,496 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-13T00:31:26,496 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 176 msec 2024-11-13T00:31:26,498 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 182 msec 2024-11-13T00:31:26,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:26,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:26,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:27,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:27,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:27,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:28,713 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:28,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:28,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:29,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:29,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:29,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:30,714 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:30,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:30,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:31,440 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 77c503dfbc138df345484c11abcb0eb4, had cached 0 bytes from a total of 14329 2024-11-13T00:31:31,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:31,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:31,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:32,715 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:32,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:32,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:33,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:33,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:33,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:34,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:34,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:34,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:35,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:35,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:35,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33675 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-13T00:31:36,405 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-13T00:31:36,408 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35993%2C1731457845270.1731457896408 2024-11-13T00:31:36,413 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,413 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,413 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,414 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,414 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,414 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.1731457886307 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.1731457896408 2024-11-13T00:31:36,414 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41235:41235),(127.0.0.1/127.0.0.1:41921:41921)] 2024-11-13T00:31:36,414 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.1731457886307 is not closed yet, will try archiving it next time 2024-11-13T00:31:36,415 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/WALs/cb28580903fb,35993,1731457845270/cb28580903fb%2C35993%2C1731457845270.1731457876238 to hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/oldWALs/cb28580903fb%2C35993%2C1731457845270.1731457876238 2024-11-13T00:31:36,415 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T00:31:36,415 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:31:36,415 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:31:36,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:31:36,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:31:36,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741842_1018 (size=2026) 2024-11-13T00:31:36,415 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T00:31:36,415 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T00:31:36,415 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=380587052, stopped=false 2024-11-13T00:31:36,415 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=cb28580903fb,33675,1731457845226 2024-11-13T00:31:36,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741842_1018 (size=2026) 2024-11-13T00:31:36,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:31:36,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:31:36,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:36,417 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:31:36,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:36,417 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:31:36,417 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:31:36,417 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:31:36,418 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'cb28580903fb,35993,1731457845270' ***** 2024-11-13T00:31:36,418 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T00:31:36,418 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T00:31:36,418 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:31:36,418 INFO [RS:0;cb28580903fb:35993 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T00:31:36,418 INFO [RS:0;cb28580903fb:35993 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T00:31:36,418 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T00:31:36,418 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(3091): Received CLOSE for 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:31:36,418 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:31:36,419 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(959): stopping server cb28580903fb,35993,1731457845270 2024-11-13T00:31:36,419 INFO [RS:0;cb28580903fb:35993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:31:36,419 INFO [RS:0;cb28580903fb:35993 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;cb28580903fb:35993. 2024-11-13T00:31:36,419 DEBUG [RS:0;cb28580903fb:35993 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:31:36,419 DEBUG [RS:0;cb28580903fb:35993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:31:36,419 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 77c503dfbc138df345484c11abcb0eb4, disabling compactions & flushes 2024-11-13T00:31:36,419 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:36,419 INFO [RS:0;cb28580903fb:35993 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T00:31:36,419 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:36,419 INFO [RS:0;cb28580903fb:35993 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T00:31:36,419 INFO [RS:0;cb28580903fb:35993 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T00:31:36,419 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. after waiting 0 ms 2024-11-13T00:31:36,419 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:36,419 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T00:31:36,419 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 77c503dfbc138df345484c11abcb0eb4 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T00:31:36,419 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-13T00:31:36,419 DEBUG [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 77c503dfbc138df345484c11abcb0eb4=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.} 2024-11-13T00:31:36,419 DEBUG [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 77c503dfbc138df345484c11abcb0eb4 2024-11-13T00:31:36,419 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:31:36,419 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:31:36,419 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:31:36,420 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:31:36,420 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:31:36,420 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-13T00:31:36,423 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/f53cbc707bef461889b6593d5f230fba is 1080, key is row0001/info:/1731457896406/Put/seqid=0 2024-11-13T00:31:36,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741845_1021 (size=6033) 2024-11-13T00:31:36,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741845_1021 (size=6033) 2024-11-13T00:31:36,428 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/f53cbc707bef461889b6593d5f230fba 2024-11-13T00:31:36,434 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/.tmp/info/f53cbc707bef461889b6593d5f230fba as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/f53cbc707bef461889b6593d5f230fba 2024-11-13T00:31:36,438 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/f53cbc707bef461889b6593d5f230fba, entries=1, sequenceid=22, filesize=5.9 K 2024-11-13T00:31:36,438 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/.tmp/info/a8596640d1514c5a8325b3057158c226 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4./info:regioninfo/1731457846452/Put/seqid=0 2024-11-13T00:31:36,440 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 77c503dfbc138df345484c11abcb0eb4 in 20ms, sequenceid=22, compaction requested=true 2024-11-13T00:31:36,440 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/c7f04cb3b9a24e7e9d04f5b8d214fe8f, hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/212451240d734a708e6e972cf281aae2, hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/95d3e46535da4779bb7c413bed086e01] to archive 2024-11-13T00:31:36,441 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T00:31:36,443 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/c7f04cb3b9a24e7e9d04f5b8d214fe8f to hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/c7f04cb3b9a24e7e9d04f5b8d214fe8f 2024-11-13T00:31:36,444 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/212451240d734a708e6e972cf281aae2 to hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/212451240d734a708e6e972cf281aae2 2024-11-13T00:31:36,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741846_1022 (size=7308) 2024-11-13T00:31:36,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741846_1022 (size=7308) 2024-11-13T00:31:36,446 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/.tmp/info/a8596640d1514c5a8325b3057158c226 2024-11-13T00:31:36,448 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/95d3e46535da4779bb7c413bed086e01 to hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/info/95d3e46535da4779bb7c413bed086e01 2024-11-13T00:31:36,449 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=cb28580903fb:33675 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T00:31:36,449 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c7f04cb3b9a24e7e9d04f5b8d214fe8f=6033, 212451240d734a708e6e972cf281aae2=6033, 95d3e46535da4779bb7c413bed086e01=6033] 2024-11-13T00:31:36,453 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/77c503dfbc138df345484c11abcb0eb4/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-13T00:31:36,453 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:36,453 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 77c503dfbc138df345484c11abcb0eb4: Waiting for close lock at 1731457896419Running coprocessor pre-close hooks at 1731457896419Disabling compacts and flushes for region at 1731457896419Disabling writes for close at 1731457896419Obtaining lock to block concurrent updates at 1731457896419Preparing flush snapshotting stores in 77c503dfbc138df345484c11abcb0eb4 at 1731457896419Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731457896419Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. at 1731457896420 (+1 ms)Flushing 77c503dfbc138df345484c11abcb0eb4/info: creating writer at 1731457896420Flushing 77c503dfbc138df345484c11abcb0eb4/info: appending metadata at 1731457896423 (+3 ms)Flushing 77c503dfbc138df345484c11abcb0eb4/info: closing flushed file at 1731457896423Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@361367f4: reopening flushed file at 1731457896433 (+10 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 77c503dfbc138df345484c11abcb0eb4 in 20ms, sequenceid=22, compaction requested=true at 1731457896440 (+7 ms)Writing region close event to WAL at 1731457896450 (+10 ms)Running coprocessor post-close hooks at 1731457896453 (+3 ms)Closed at 1731457896453 2024-11-13T00:31:36,454 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731457846101.77c503dfbc138df345484c11abcb0eb4. 2024-11-13T00:31:36,466 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/.tmp/ns/8e7024e80c3846b0a046e20582494664 is 43, key is default/ns:d/1731457846066/Put/seqid=0 2024-11-13T00:31:36,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741847_1023 (size=5153) 2024-11-13T00:31:36,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741847_1023 (size=5153) 2024-11-13T00:31:36,470 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/.tmp/ns/8e7024e80c3846b0a046e20582494664 2024-11-13T00:31:36,493 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/.tmp/table/7a3d571aa29b42d3876dd94194c38d79 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731457846461/Put/seqid=0 2024-11-13T00:31:36,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741848_1024 (size=5508) 2024-11-13T00:31:36,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741848_1024 (size=5508) 2024-11-13T00:31:36,497 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/.tmp/table/7a3d571aa29b42d3876dd94194c38d79 2024-11-13T00:31:36,502 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/.tmp/info/a8596640d1514c5a8325b3057158c226 as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/info/a8596640d1514c5a8325b3057158c226 2024-11-13T00:31:36,507 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/info/a8596640d1514c5a8325b3057158c226, entries=10, sequenceid=11, filesize=7.1 K 2024-11-13T00:31:36,508 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/.tmp/ns/8e7024e80c3846b0a046e20582494664 as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/ns/8e7024e80c3846b0a046e20582494664 2024-11-13T00:31:36,512 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/ns/8e7024e80c3846b0a046e20582494664, entries=2, sequenceid=11, filesize=5.0 K 2024-11-13T00:31:36,513 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/.tmp/table/7a3d571aa29b42d3876dd94194c38d79 as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/table/7a3d571aa29b42d3876dd94194c38d79 2024-11-13T00:31:36,518 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/table/7a3d571aa29b42d3876dd94194c38d79, entries=2, sequenceid=11, filesize=5.4 K 2024-11-13T00:31:36,518 INFO [regionserver/cb28580903fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-13T00:31:36,518 INFO [regionserver/cb28580903fb:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-13T00:31:36,519 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 99ms, sequenceid=11, compaction requested=false 2024-11-13T00:31:36,523 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-13T00:31:36,524 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:31:36,524 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:31:36,524 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457896419Running coprocessor pre-close hooks at 1731457896419Disabling compacts and flushes for region at 1731457896419Disabling writes for close at 1731457896420 (+1 ms)Obtaining lock to block concurrent updates at 1731457896420Preparing flush snapshotting stores in 1588230740 at 1731457896420Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731457896420Flushing stores of hbase:meta,,1.1588230740 at 1731457896421 (+1 ms)Flushing 1588230740/info: creating writer at 1731457896421Flushing 1588230740/info: appending metadata at 1731457896438 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731457896438Flushing 1588230740/ns: creating writer at 1731457896451 (+13 ms)Flushing 1588230740/ns: appending metadata at 1731457896465 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731457896465Flushing 1588230740/table: creating writer at 1731457896475 (+10 ms)Flushing 1588230740/table: appending metadata at 1731457896492 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731457896492Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@549f2c05: reopening flushed file at 1731457896502 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74ba21f6: reopening flushed file at 1731457896507 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e1e2ced: reopening flushed file at 1731457896512 (+5 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 99ms, sequenceid=11, compaction requested=false at 1731457896519 (+7 ms)Writing region close event to WAL at 1731457896520 (+1 ms)Running coprocessor post-close hooks at 1731457896524 (+4 ms)Closed at 1731457896524 2024-11-13T00:31:36,524 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T00:31:36,619 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(976): stopping server cb28580903fb,35993,1731457845270; all regions closed. 2024-11-13T00:31:36,620 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,620 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,620 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,620 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,620 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741834_1010 (size=3306) 2024-11-13T00:31:36,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741834_1010 (size=3306) 2024-11-13T00:31:36,625 DEBUG [RS:0;cb28580903fb:35993 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/oldWALs 2024-11-13T00:31:36,625 INFO [RS:0;cb28580903fb:35993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C35993%2C1731457845270.meta:.meta(num 1731457846026) 2024-11-13T00:31:36,626 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,626 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,626 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,626 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,626 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741844_1020 (size=1252) 2024-11-13T00:31:36,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741844_1020 (size=1252) 2024-11-13T00:31:36,631 DEBUG [RS:0;cb28580903fb:35993 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/oldWALs 2024-11-13T00:31:36,631 INFO [RS:0;cb28580903fb:35993 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C35993%2C1731457845270:(num 1731457896408) 2024-11-13T00:31:36,631 DEBUG [RS:0;cb28580903fb:35993 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:31:36,631 INFO [RS:0;cb28580903fb:35993 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:31:36,631 INFO [RS:0;cb28580903fb:35993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:31:36,631 INFO [RS:0;cb28580903fb:35993 {}] hbase.ChoreService(370): Chore service for: regionserver/cb28580903fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T00:31:36,631 INFO [RS:0;cb28580903fb:35993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:31:36,631 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:31:36,631 INFO [RS:0;cb28580903fb:35993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35993 2024-11-13T00:31:36,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/cb28580903fb,35993,1731457845270 2024-11-13T00:31:36,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:31:36,633 INFO [RS:0;cb28580903fb:35993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:31:36,635 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [cb28580903fb,35993,1731457845270] 2024-11-13T00:31:36,637 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/cb28580903fb,35993,1731457845270 already deleted, retry=false 2024-11-13T00:31:36,637 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; cb28580903fb,35993,1731457845270 expired; onlineServers=0 2024-11-13T00:31:36,637 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'cb28580903fb,33675,1731457845226' ***** 2024-11-13T00:31:36,637 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T00:31:36,637 INFO [M:0;cb28580903fb:33675 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:31:36,637 INFO [M:0;cb28580903fb:33675 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:31:36,638 DEBUG [M:0;cb28580903fb:33675 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T00:31:36,638 DEBUG [M:0;cb28580903fb:33675 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T00:31:36,638 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T00:31:36,638 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457845422 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457845422,5,FailOnTimeoutGroup] 2024-11-13T00:31:36,638 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457845421 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457845421,5,FailOnTimeoutGroup] 2024-11-13T00:31:36,638 INFO [M:0;cb28580903fb:33675 {}] hbase.ChoreService(370): Chore service for: master/cb28580903fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T00:31:36,638 INFO [M:0;cb28580903fb:33675 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:31:36,638 DEBUG [M:0;cb28580903fb:33675 {}] master.HMaster(1795): Stopping service threads 2024-11-13T00:31:36,638 INFO [M:0;cb28580903fb:33675 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T00:31:36,638 INFO [M:0;cb28580903fb:33675 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:31:36,639 INFO [M:0;cb28580903fb:33675 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T00:31:36,639 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T00:31:36,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T00:31:36,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:36,639 DEBUG [M:0;cb28580903fb:33675 {}] zookeeper.ZKUtil(347): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T00:31:36,639 WARN [M:0;cb28580903fb:33675 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T00:31:36,640 INFO [M:0;cb28580903fb:33675 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/.lastflushedseqids 2024-11-13T00:31:36,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741849_1025 (size=130) 2024-11-13T00:31:36,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741849_1025 (size=130) 2024-11-13T00:31:36,645 INFO [M:0;cb28580903fb:33675 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T00:31:36,646 INFO [M:0;cb28580903fb:33675 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T00:31:36,646 DEBUG [M:0;cb28580903fb:33675 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:31:36,646 INFO [M:0;cb28580903fb:33675 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:31:36,646 DEBUG [M:0;cb28580903fb:33675 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:31:36,646 DEBUG [M:0;cb28580903fb:33675 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:31:36,646 DEBUG [M:0;cb28580903fb:33675 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:31:36,646 INFO [M:0;cb28580903fb:33675 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-13T00:31:36,662 DEBUG [M:0;cb28580903fb:33675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/100cfbdfa8eb4c67ac5f58daefdbde9b is 82, key is hbase:meta,,1/info:regioninfo/1731457846050/Put/seqid=0 2024-11-13T00:31:36,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741850_1026 (size=5672) 2024-11-13T00:31:36,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741850_1026 (size=5672) 2024-11-13T00:31:36,667 INFO [M:0;cb28580903fb:33675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/100cfbdfa8eb4c67ac5f58daefdbde9b 2024-11-13T00:31:36,694 DEBUG [M:0;cb28580903fb:33675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fca053fca8324ffca6d37d613ddffcc3 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731457846465/Put/seqid=0 2024-11-13T00:31:36,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741851_1027 (size=7824) 2024-11-13T00:31:36,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741851_1027 (size=7824) 2024-11-13T00:31:36,699 INFO [M:0;cb28580903fb:33675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fca053fca8324ffca6d37d613ddffcc3 2024-11-13T00:31:36,703 INFO [M:0;cb28580903fb:33675 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fca053fca8324ffca6d37d613ddffcc3 2024-11-13T00:31:36,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:36,718 DEBUG [M:0;cb28580903fb:33675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cdcb3ff768d5442bb9f47a1e0e5f7531 is 69, key is cb28580903fb,35993,1731457845270/rs:state/1731457845506/Put/seqid=0 2024-11-13T00:31:36,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741852_1028 (size=5156) 2024-11-13T00:31:36,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741852_1028 (size=5156) 2024-11-13T00:31:36,723 INFO [M:0;cb28580903fb:33675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cdcb3ff768d5442bb9f47a1e0e5f7531 2024-11-13T00:31:36,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:31:36,735 INFO [RS:0;cb28580903fb:35993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:31:36,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35993-0x1010ebbf26d0001, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:31:36,735 INFO [RS:0;cb28580903fb:35993 {}] regionserver.HRegionServer(1031): Exiting; stopping=cb28580903fb,35993,1731457845270; zookeeper connection closed. 2024-11-13T00:31:36,735 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@366151ce {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@366151ce 2024-11-13T00:31:36,735 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T00:31:36,741 DEBUG [M:0;cb28580903fb:33675 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/31cce1f6c8524b70910335ff9554eb3d is 52, key is load_balancer_on/state:d/1731457846097/Put/seqid=0 2024-11-13T00:31:36,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741853_1029 (size=5056) 2024-11-13T00:31:36,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741853_1029 (size=5056) 2024-11-13T00:31:36,745 INFO [M:0;cb28580903fb:33675 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/31cce1f6c8524b70910335ff9554eb3d 2024-11-13T00:31:36,750 DEBUG [M:0;cb28580903fb:33675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/100cfbdfa8eb4c67ac5f58daefdbde9b as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/100cfbdfa8eb4c67ac5f58daefdbde9b 2024-11-13T00:31:36,755 INFO [M:0;cb28580903fb:33675 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/100cfbdfa8eb4c67ac5f58daefdbde9b, entries=8, sequenceid=121, filesize=5.5 K 2024-11-13T00:31:36,756 DEBUG [M:0;cb28580903fb:33675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fca053fca8324ffca6d37d613ddffcc3 as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fca053fca8324ffca6d37d613ddffcc3 2024-11-13T00:31:36,761 INFO [M:0;cb28580903fb:33675 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fca053fca8324ffca6d37d613ddffcc3 2024-11-13T00:31:36,761 INFO [M:0;cb28580903fb:33675 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fca053fca8324ffca6d37d613ddffcc3, entries=14, sequenceid=121, filesize=7.6 K 2024-11-13T00:31:36,762 DEBUG [M:0;cb28580903fb:33675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cdcb3ff768d5442bb9f47a1e0e5f7531 as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cdcb3ff768d5442bb9f47a1e0e5f7531 2024-11-13T00:31:36,766 INFO [M:0;cb28580903fb:33675 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cdcb3ff768d5442bb9f47a1e0e5f7531, entries=1, sequenceid=121, filesize=5.0 K 2024-11-13T00:31:36,767 DEBUG [M:0;cb28580903fb:33675 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/31cce1f6c8524b70910335ff9554eb3d as hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/31cce1f6c8524b70910335ff9554eb3d 2024-11-13T00:31:36,770 INFO [M:0;cb28580903fb:33675 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39533/user/jenkins/test-data/f0421069-58d7-d4ce-1e56-b679fee0e922/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/31cce1f6c8524b70910335ff9554eb3d, entries=1, sequenceid=121, filesize=4.9 K 2024-11-13T00:31:36,771 INFO [M:0;cb28580903fb:33675 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=121, compaction requested=false 2024-11-13T00:31:36,773 INFO [M:0;cb28580903fb:33675 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:31:36,773 DEBUG [M:0;cb28580903fb:33675 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457896646Disabling compacts and flushes for region at 1731457896646Disabling writes for close at 1731457896646Obtaining lock to block concurrent updates at 1731457896646Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731457896646Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1731457896646Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731457896647 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731457896647Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731457896661 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731457896661Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731457896672 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731457896693 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731457896693Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731457896703 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731457896717 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731457896717Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731457896727 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731457896740 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731457896741 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ce47f12: reopening flushed file at 1731457896749 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4e1b39a9: reopening flushed file at 1731457896755 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bb12453: reopening flushed file at 1731457896761 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4abdc856: reopening flushed file at 1731457896766 (+5 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 125ms, sequenceid=121, compaction requested=false at 1731457896771 (+5 ms)Writing region close event to WAL at 1731457896773 (+2 ms)Closed at 1731457896773 2024-11-13T00:31:36,773 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,773 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,774 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,774 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,774 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:31:36,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46085 is added to blk_1073741830_1006 (size=53047) 2024-11-13T00:31:36,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38553 is added to blk_1073741830_1006 (size=53047) 2024-11-13T00:31:36,776 INFO [M:0;cb28580903fb:33675 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T00:31:36,776 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:31:36,776 INFO [M:0;cb28580903fb:33675 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33675 2024-11-13T00:31:36,776 INFO [M:0;cb28580903fb:33675 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:31:36,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:31:36,879 INFO [M:0;cb28580903fb:33675 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:31:36,879 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33675-0x1010ebbf26d0000, quorum=127.0.0.1:58448, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:31:36,882 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@66db372d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:31:36,882 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f06ed2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:31:36,882 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:31:36,882 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f2fd5ff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:31:36,882 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ee92539{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/hadoop.log.dir/,STOPPED} 2024-11-13T00:31:36,883 WARN [BP-1184649482-172.17.0.2-1731457844458 heartbeating to localhost/127.0.0.1:39533 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:31:36,883 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:31:36,884 WARN [BP-1184649482-172.17.0.2-1731457844458 heartbeating to localhost/127.0.0.1:39533 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1184649482-172.17.0.2-1731457844458 (Datanode Uuid 7766541c-f1e7-4073-9ed8-7d3188f3da13) service to localhost/127.0.0.1:39533 2024-11-13T00:31:36,884 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:31:36,884 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/data/data3/current/BP-1184649482-172.17.0.2-1731457844458 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:31:36,884 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/data/data4/current/BP-1184649482-172.17.0.2-1731457844458 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:31:36,885 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:31:36,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2fe1acb9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:31:36,887 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1781cd99{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:31:36,887 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:31:36,887 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6778d1f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:31:36,887 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18d07610{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/hadoop.log.dir/,STOPPED} 2024-11-13T00:31:36,888 WARN [BP-1184649482-172.17.0.2-1731457844458 heartbeating to localhost/127.0.0.1:39533 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:31:36,888 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:31:36,889 WARN [BP-1184649482-172.17.0.2-1731457844458 heartbeating to localhost/127.0.0.1:39533 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1184649482-172.17.0.2-1731457844458 (Datanode Uuid d195dff4-3752-4c6a-8f06-df5619606dc9) service to localhost/127.0.0.1:39533 2024-11-13T00:31:36,889 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:31:36,889 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/data/data1/current/BP-1184649482-172.17.0.2-1731457844458 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:31:36,889 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/cluster_cedab1ee-1ff8-0269-fdde-fcd88c748e68/data/data2/current/BP-1184649482-172.17.0.2-1731457844458 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:31:36,889 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:31:36,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@683927ba{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:31:36,896 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@46ff36eb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:31:36,896 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:31:36,896 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@391cb626{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:31:36,896 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d795589{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/hadoop.log.dir/,STOPPED} 2024-11-13T00:31:36,902 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T00:31:36,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T00:31:36,928 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=209 (was 182) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:39533 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39533 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39533 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39533 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/cb28580903fb:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39533 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:39533 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39533 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:39533 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:39533 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=94 (was 198), ProcessCount=11 (was 11), AvailableMemoryMB=1243 (was 1292) 2024-11-13T00:31:36,935 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=209, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=94, ProcessCount=11, AvailableMemoryMB=1243 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/hadoop.log.dir so I do NOT create it in target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f7ff0268-c833-3a7c-dc93-16dfa0701c6a/hadoop.tmp.dir so I do NOT create it in target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045, deleteOnExit=true 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/test.cache.data in system properties and HBase conf 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/hadoop.log.dir in system properties and HBase conf 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T00:31:36,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T00:31:36,937 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/nfs.dump.dir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/java.io.tmpdir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T00:31:36,937 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T00:31:36,950 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:31:36,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:36,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:37,009 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:31:37,012 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:31:37,013 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:31:37,013 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:31:37,013 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:31:37,014 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:31:37,014 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3712ee8f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:31:37,015 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5410bc8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:31:37,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32d04f87{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/java.io.tmpdir/jetty-localhost-44529-hadoop-hdfs-3_4_1-tests_jar-_-any-10701757366342848203/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:31:37,128 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22c7a053{HTTP/1.1, (http/1.1)}{localhost:44529} 2024-11-13T00:31:37,129 INFO [Time-limited test {}] server.Server(415): Started @242977ms 2024-11-13T00:31:37,141 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:31:37,237 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:31:37,243 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:31:37,244 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:31:37,244 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:31:37,244 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:31:37,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@529d096e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:31:37,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@42326ea1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:31:37,370 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ebf55d8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/java.io.tmpdir/jetty-localhost-36001-hadoop-hdfs-3_4_1-tests_jar-_-any-13787363677009290346/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:31:37,371 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10460741{HTTP/1.1, (http/1.1)}{localhost:36001} 2024-11-13T00:31:37,371 INFO [Time-limited test {}] server.Server(415): Started @243219ms 2024-11-13T00:31:37,372 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:31:37,400 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:31:37,403 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:31:37,404 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:31:37,404 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:31:37,404 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:31:37,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@602a7b70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:31:37,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c84a90f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:31:37,470 WARN [Thread-1974 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/data/data1/current/BP-487134543-172.17.0.2-1731457896957/current, will proceed with Du for space computation calculation, 2024-11-13T00:31:37,470 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/data/data2/current/BP-487134543-172.17.0.2-1731457896957/current, will proceed with Du for space computation calculation, 2024-11-13T00:31:37,486 WARN [Thread-1953 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:31:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc236d51fd7b3e3b5 with lease ID 0xbd2733cbe5f0ac5c: Processing first storage report for DS-3c571f03-a5e1-4650-b411-ed7d0ea14fe7 from datanode DatanodeRegistration(127.0.0.1:43865, datanodeUuid=5b2e41f1-2960-4af4-a5e7-9d9852fd9250, infoPort=37755, infoSecurePort=0, ipcPort=35139, storageInfo=lv=-57;cid=testClusterID;nsid=354687523;c=1731457896957) 2024-11-13T00:31:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc236d51fd7b3e3b5 with lease ID 0xbd2733cbe5f0ac5c: from storage DS-3c571f03-a5e1-4650-b411-ed7d0ea14fe7 node DatanodeRegistration(127.0.0.1:43865, datanodeUuid=5b2e41f1-2960-4af4-a5e7-9d9852fd9250, infoPort=37755, infoSecurePort=0, ipcPort=35139, storageInfo=lv=-57;cid=testClusterID;nsid=354687523;c=1731457896957), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:31:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc236d51fd7b3e3b5 with lease ID 0xbd2733cbe5f0ac5c: Processing first storage report for DS-2b9792dd-7816-470a-a3fb-c87b1e805ce7 from datanode DatanodeRegistration(127.0.0.1:43865, datanodeUuid=5b2e41f1-2960-4af4-a5e7-9d9852fd9250, infoPort=37755, infoSecurePort=0, ipcPort=35139, storageInfo=lv=-57;cid=testClusterID;nsid=354687523;c=1731457896957) 2024-11-13T00:31:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc236d51fd7b3e3b5 with lease ID 0xbd2733cbe5f0ac5c: from storage DS-2b9792dd-7816-470a-a3fb-c87b1e805ce7 node DatanodeRegistration(127.0.0.1:43865, datanodeUuid=5b2e41f1-2960-4af4-a5e7-9d9852fd9250, infoPort=37755, infoSecurePort=0, ipcPort=35139, storageInfo=lv=-57;cid=testClusterID;nsid=354687523;c=1731457896957), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:31:37,520 INFO [regionserver/cb28580903fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:31:37,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d183f58{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/java.io.tmpdir/jetty-localhost-33793-hadoop-hdfs-3_4_1-tests_jar-_-any-7517300203710160506/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:31:37,527 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72db48b3{HTTP/1.1, (http/1.1)}{localhost:33793} 2024-11-13T00:31:37,527 INFO [Time-limited test {}] server.Server(415): Started @243375ms 2024-11-13T00:31:37,528 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:31:37,629 WARN [Thread-2000 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/data/data3/current/BP-487134543-172.17.0.2-1731457896957/current, will proceed with Du for space computation calculation, 2024-11-13T00:31:37,629 WARN [Thread-2001 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/data/data4/current/BP-487134543-172.17.0.2-1731457896957/current, will proceed with Du for space computation calculation, 2024-11-13T00:31:37,644 WARN [Thread-1989 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:31:37,646 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6887aa868c5b5620 with lease ID 0xbd2733cbe5f0ac5d: Processing first storage report for DS-eeb915db-ccf2-411c-96fc-970c36fbfba3 from datanode DatanodeRegistration(127.0.0.1:46003, datanodeUuid=58564819-483e-4563-98b6-090c7bbe77ce, infoPort=38763, infoSecurePort=0, ipcPort=35311, storageInfo=lv=-57;cid=testClusterID;nsid=354687523;c=1731457896957) 2024-11-13T00:31:37,646 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6887aa868c5b5620 with lease ID 0xbd2733cbe5f0ac5d: from storage DS-eeb915db-ccf2-411c-96fc-970c36fbfba3 node DatanodeRegistration(127.0.0.1:46003, datanodeUuid=58564819-483e-4563-98b6-090c7bbe77ce, infoPort=38763, infoSecurePort=0, ipcPort=35311, storageInfo=lv=-57;cid=testClusterID;nsid=354687523;c=1731457896957), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:31:37,646 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6887aa868c5b5620 with lease ID 0xbd2733cbe5f0ac5d: Processing first storage report for DS-472034b4-625f-454d-ba95-bb60ff570747 from datanode DatanodeRegistration(127.0.0.1:46003, datanodeUuid=58564819-483e-4563-98b6-090c7bbe77ce, infoPort=38763, infoSecurePort=0, ipcPort=35311, storageInfo=lv=-57;cid=testClusterID;nsid=354687523;c=1731457896957) 2024-11-13T00:31:37,646 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6887aa868c5b5620 with lease ID 0xbd2733cbe5f0ac5d: from storage DS-472034b4-625f-454d-ba95-bb60ff570747 node DatanodeRegistration(127.0.0.1:46003, datanodeUuid=58564819-483e-4563-98b6-090c7bbe77ce, infoPort=38763, infoSecurePort=0, ipcPort=35311, storageInfo=lv=-57;cid=testClusterID;nsid=354687523;c=1731457896957), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:31:37,650 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2 2024-11-13T00:31:37,653 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/zookeeper_0, clientPort=63834, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T00:31:37,654 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63834 2024-11-13T00:31:37,654 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:31:37,655 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:31:37,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:31:37,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:31:37,664 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132 with version=8 2024-11-13T00:31:37,664 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/hbase-staging 2024-11-13T00:31:37,666 INFO [Time-limited test {}] client.ConnectionUtils(128): master/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:31:37,666 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:31:37,666 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:31:37,666 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:31:37,666 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:31:37,666 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:31:37,666 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T00:31:37,666 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:31:37,667 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39625 2024-11-13T00:31:37,668 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39625 connecting to ZooKeeper ensemble=127.0.0.1:63834 2024-11-13T00:31:37,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:396250x0, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:31:37,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39625-0x1010ebcbf430000 connected 2024-11-13T00:31:37,688 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:31:37,689 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:31:37,691 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:31:37,691 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132, hbase.cluster.distributed=false 2024-11-13T00:31:37,693 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:31:37,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39625 2024-11-13T00:31:37,694 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39625 2024-11-13T00:31:37,697 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39625 2024-11-13T00:31:37,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39625 2024-11-13T00:31:37,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39625 2024-11-13T00:31:37,713 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:31:37,713 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:31:37,713 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:31:37,713 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:31:37,713 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:31:37,713 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:31:37,713 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T00:31:37,713 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:31:37,713 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35989 2024-11-13T00:31:37,715 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35989 connecting to ZooKeeper ensemble=127.0.0.1:63834 2024-11-13T00:31:37,715 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:31:37,717 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:31:37,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:37,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359890x0, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:31:37,721 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359890x0, quorum=127.0.0.1:63834, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:31:37,721 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35989-0x1010ebcbf430001 connected 2024-11-13T00:31:37,721 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T00:31:37,721 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T00:31:37,722 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T00:31:37,723 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:31:37,723 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35989 2024-11-13T00:31:37,723 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35989 2024-11-13T00:31:37,723 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35989 2024-11-13T00:31:37,724 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35989 2024-11-13T00:31:37,724 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35989 2024-11-13T00:31:37,735 DEBUG [M:0;cb28580903fb:39625 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;cb28580903fb:39625 2024-11-13T00:31:37,735 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/cb28580903fb,39625,1731457897665 2024-11-13T00:31:37,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:31:37,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:31:37,737 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/cb28580903fb,39625,1731457897665 2024-11-13T00:31:37,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T00:31:37,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:37,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:37,740 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T00:31:37,741 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/cb28580903fb,39625,1731457897665 from backup master directory 2024-11-13T00:31:37,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/cb28580903fb,39625,1731457897665 2024-11-13T00:31:37,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:31:37,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:31:37,742 WARN [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:31:37,742 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=cb28580903fb,39625,1731457897665 2024-11-13T00:31:37,746 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/hbase.id] with ID: fc359955-a732-4ca7-b2bc-7606d5e1d8c6 2024-11-13T00:31:37,746 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/.tmp/hbase.id 2024-11-13T00:31:37,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:31:37,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:31:37,752 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/.tmp/hbase.id]:[hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/hbase.id] 2024-11-13T00:31:37,761 INFO [master/cb28580903fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:31:37,761 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T00:31:37,762 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T00:31:37,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:37,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:37,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:31:37,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:31:37,769 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:31:37,770 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T00:31:37,770 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:31:37,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:31:37,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:31:37,776 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store 2024-11-13T00:31:37,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:31:37,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:31:37,782 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:31:37,782 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:31:37,782 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:31:37,782 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:31:37,782 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:31:37,782 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:31:37,782 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:31:37,782 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457897782Disabling compacts and flushes for region at 1731457897782Disabling writes for close at 1731457897782Writing region close event to WAL at 1731457897782Closed at 1731457897782 2024-11-13T00:31:37,783 WARN [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/.initializing 2024-11-13T00:31:37,783 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/WALs/cb28580903fb,39625,1731457897665 2024-11-13T00:31:37,786 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C39625%2C1731457897665, suffix=, logDir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/WALs/cb28580903fb,39625,1731457897665, archiveDir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/oldWALs, maxLogs=10 2024-11-13T00:31:37,786 INFO [master/cb28580903fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C39625%2C1731457897665.1731457897786 2024-11-13T00:31:37,790 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/WALs/cb28580903fb,39625,1731457897665/cb28580903fb%2C39625%2C1731457897665.1731457897786 2024-11-13T00:31:37,790 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38763:38763),(127.0.0.1/127.0.0.1:37755:37755)] 2024-11-13T00:31:37,791 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:31:37,791 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:31:37,791 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,791 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T00:31:37,797 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:37,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:31:37,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T00:31:37,798 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:37,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:31:37,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T00:31:37,800 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:37,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:31:37,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T00:31:37,801 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:37,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:31:37,802 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,802 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,803 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,804 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,804 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,804 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T00:31:37,805 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:31:37,806 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:31:37,807 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=850030, jitterRate=0.08086967468261719}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T00:31:37,807 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731457897791Initializing all the Stores at 1731457897792 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457897792Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457897796 (+4 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457897796Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457897796Cleaning up temporary data from old regions at 1731457897804 (+8 ms)Region opened successfully at 1731457897807 (+3 ms) 2024-11-13T00:31:37,808 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T00:31:37,810 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@749caa41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:31:37,811 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T00:31:37,811 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T00:31:37,811 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T00:31:37,811 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T00:31:37,811 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T00:31:37,812 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T00:31:37,812 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T00:31:37,813 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T00:31:37,814 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T00:31:37,815 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T00:31:37,815 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T00:31:37,816 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T00:31:37,818 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T00:31:37,818 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T00:31:37,819 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T00:31:37,820 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T00:31:37,821 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T00:31:37,822 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T00:31:37,823 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T00:31:37,824 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T00:31:37,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:31:37,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:31:37,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:37,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:37,827 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=cb28580903fb,39625,1731457897665, sessionid=0x1010ebcbf430000, setting cluster-up flag (Was=false) 2024-11-13T00:31:37,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:37,829 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:37,834 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T00:31:37,835 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,39625,1731457897665 2024-11-13T00:31:37,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:37,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:37,843 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T00:31:37,844 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,39625,1731457897665 2024-11-13T00:31:37,845 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T00:31:37,846 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T00:31:37,846 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T00:31:37,846 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T00:31:37,846 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: cb28580903fb,39625,1731457897665 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T00:31:37,847 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:31:37,847 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:31:37,847 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:31:37,848 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:31:37,848 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/cb28580903fb:0, corePoolSize=10, maxPoolSize=10 2024-11-13T00:31:37,848 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,848 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:31:37,848 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,849 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731457927849 2024-11-13T00:31:37,849 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T00:31:37,849 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T00:31:37,849 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T00:31:37,849 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T00:31:37,849 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T00:31:37,849 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T00:31:37,849 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:31:37,849 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T00:31:37,850 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,850 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T00:31:37,850 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T00:31:37,850 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T00:31:37,850 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:37,850 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T00:31:37,852 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T00:31:37,852 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T00:31:37,852 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457897852,5,FailOnTimeoutGroup] 2024-11-13T00:31:37,853 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457897852,5,FailOnTimeoutGroup] 2024-11-13T00:31:37,853 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,853 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T00:31:37,853 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,853 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:31:37,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:31:37,858 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T00:31:37,859 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132 2024-11-13T00:31:37,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:31:37,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:31:37,868 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:31:37,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:31:37,870 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:31:37,870 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:37,870 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:31:37,870 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:31:37,871 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:31:37,871 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:37,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:31:37,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:31:37,873 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:31:37,873 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:37,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:31:37,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:31:37,874 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:31:37,874 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:37,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:31:37,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:31:37,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740 2024-11-13T00:31:37,875 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740 2024-11-13T00:31:37,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:31:37,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:31:37,877 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:31:37,877 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:31:37,879 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:31:37,879 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776495, jitterRate=-0.012636318802833557}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:31:37,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731457897868Initializing all the Stores at 1731457897868Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457897868Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457897869 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457897869Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457897869Cleaning up temporary data from old regions at 1731457897876 (+7 ms)Region opened successfully at 1731457897880 (+4 ms) 2024-11-13T00:31:37,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:31:37,880 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:31:37,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:31:37,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:31:37,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:31:37,880 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:31:37,880 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457897880Disabling compacts and flushes for region at 1731457897880Disabling writes for close at 1731457897880Writing region close event to WAL at 1731457897880Closed at 1731457897880 2024-11-13T00:31:37,882 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:31:37,882 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T00:31:37,882 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T00:31:37,883 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:31:37,884 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T00:31:37,925 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(746): ClusterId : fc359955-a732-4ca7-b2bc-7606d5e1d8c6 2024-11-13T00:31:37,926 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T00:31:37,928 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T00:31:37,928 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T00:31:37,930 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T00:31:37,930 DEBUG [RS:0;cb28580903fb:35989 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d3023a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:31:37,942 DEBUG [RS:0;cb28580903fb:35989 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;cb28580903fb:35989 2024-11-13T00:31:37,942 INFO [RS:0;cb28580903fb:35989 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T00:31:37,942 INFO [RS:0;cb28580903fb:35989 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T00:31:37,942 DEBUG [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T00:31:37,943 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(2659): reportForDuty to master=cb28580903fb,39625,1731457897665 with port=35989, startcode=1731457897712 2024-11-13T00:31:37,943 DEBUG [RS:0;cb28580903fb:35989 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T00:31:37,945 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55657, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T00:31:37,946 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] master.ServerManager(363): Checking decommissioned status of RegionServer cb28580903fb,35989,1731457897712 2024-11-13T00:31:37,946 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] master.ServerManager(517): Registering regionserver=cb28580903fb,35989,1731457897712 2024-11-13T00:31:37,947 DEBUG [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132 2024-11-13T00:31:37,947 DEBUG [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44083 2024-11-13T00:31:37,947 DEBUG [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T00:31:37,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:31:37,949 DEBUG [RS:0;cb28580903fb:35989 {}] zookeeper.ZKUtil(111): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/cb28580903fb,35989,1731457897712 2024-11-13T00:31:37,949 WARN [RS:0;cb28580903fb:35989 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:31:37,949 INFO [RS:0;cb28580903fb:35989 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:31:37,950 DEBUG [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712 2024-11-13T00:31:37,950 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [cb28580903fb,35989,1731457897712] 2024-11-13T00:31:37,953 INFO [RS:0;cb28580903fb:35989 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T00:31:37,954 INFO [RS:0;cb28580903fb:35989 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T00:31:37,954 INFO [RS:0;cb28580903fb:35989 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T00:31:37,954 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,954 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T00:31:37,955 INFO [RS:0;cb28580903fb:35989 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T00:31:37,955 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:31:37,956 DEBUG [RS:0;cb28580903fb:35989 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:31:37,957 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,957 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,957 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,957 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,957 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,957 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35989,1731457897712-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:31:37,972 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T00:31:37,972 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,35989,1731457897712-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,972 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,972 INFO [RS:0;cb28580903fb:35989 {}] regionserver.Replication(171): cb28580903fb,35989,1731457897712 started 2024-11-13T00:31:37,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:37,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:37,985 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:37,985 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(1482): Serving as cb28580903fb,35989,1731457897712, RpcServer on cb28580903fb/172.17.0.2:35989, sessionid=0x1010ebcbf430001 2024-11-13T00:31:37,985 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T00:31:37,985 DEBUG [RS:0;cb28580903fb:35989 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager cb28580903fb,35989,1731457897712 2024-11-13T00:31:37,985 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,35989,1731457897712' 2024-11-13T00:31:37,985 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T00:31:37,986 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T00:31:37,986 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T00:31:37,986 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T00:31:37,986 DEBUG [RS:0;cb28580903fb:35989 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager cb28580903fb,35989,1731457897712 2024-11-13T00:31:37,986 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,35989,1731457897712' 2024-11-13T00:31:37,986 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T00:31:37,986 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T00:31:37,987 DEBUG [RS:0;cb28580903fb:35989 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T00:31:37,987 INFO [RS:0;cb28580903fb:35989 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T00:31:37,987 INFO [RS:0;cb28580903fb:35989 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T00:31:38,034 WARN [cb28580903fb:39625 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T00:31:38,089 INFO [RS:0;cb28580903fb:35989 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C35989%2C1731457897712, suffix=, logDir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712, archiveDir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/oldWALs, maxLogs=32 2024-11-13T00:31:38,090 INFO [RS:0;cb28580903fb:35989 {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35989%2C1731457897712.1731457898089 2024-11-13T00:31:38,095 INFO [RS:0;cb28580903fb:35989 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712/cb28580903fb%2C35989%2C1731457897712.1731457898089 2024-11-13T00:31:38,096 DEBUG [RS:0;cb28580903fb:35989 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37755:37755),(127.0.0.1/127.0.0.1:38763:38763)] 2024-11-13T00:31:38,284 DEBUG [cb28580903fb:39625 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T00:31:38,285 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=cb28580903fb,35989,1731457897712 2024-11-13T00:31:38,286 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,35989,1731457897712, state=OPENING 2024-11-13T00:31:38,288 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T00:31:38,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:38,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:31:38,290 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:31:38,290 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:31:38,290 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:31:38,290 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,35989,1731457897712}] 2024-11-13T00:31:38,443 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T00:31:38,445 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37187, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T00:31:38,448 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T00:31:38,448 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:31:38,450 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C35989%2C1731457897712.meta, suffix=.meta, logDir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712, archiveDir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/oldWALs, maxLogs=32 2024-11-13T00:31:38,450 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35989%2C1731457897712.meta.1731457898450.meta 2024-11-13T00:31:38,456 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712/cb28580903fb%2C35989%2C1731457897712.meta.1731457898450.meta 2024-11-13T00:31:38,457 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37755:37755),(127.0.0.1/127.0.0.1:38763:38763)] 2024-11-13T00:31:38,458 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:31:38,458 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T00:31:38,458 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T00:31:38,458 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T00:31:38,459 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T00:31:38,459 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:31:38,459 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T00:31:38,459 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T00:31:38,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:31:38,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:31:38,461 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:38,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:31:38,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:31:38,462 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:31:38,462 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:38,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:31:38,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:31:38,462 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:31:38,462 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:38,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:31:38,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:31:38,463 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:31:38,463 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:38,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:31:38,464 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:31:38,464 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740 2024-11-13T00:31:38,465 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740 2024-11-13T00:31:38,466 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:31:38,467 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:31:38,467 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:31:38,468 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:31:38,469 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745294, jitterRate=-0.0523100346326828}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:31:38,469 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T00:31:38,469 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731457898459Writing region info on filesystem at 1731457898459Initializing all the Stores at 1731457898459Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457898459Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457898460 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457898460Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457898460Cleaning up temporary data from old regions at 1731457898467 (+7 ms)Running coprocessor post-open hooks at 1731457898469 (+2 ms)Region opened successfully at 1731457898469 2024-11-13T00:31:38,470 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731457898443 2024-11-13T00:31:38,472 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T00:31:38,472 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T00:31:38,473 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,35989,1731457897712 2024-11-13T00:31:38,474 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,35989,1731457897712, state=OPEN 2024-11-13T00:31:38,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:31:38,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:31:38,480 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=cb28580903fb,35989,1731457897712 2024-11-13T00:31:38,480 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:31:38,480 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:31:38,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T00:31:38,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,35989,1731457897712 in 190 msec 2024-11-13T00:31:38,485 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T00:31:38,485 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 601 msec 2024-11-13T00:31:38,486 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:31:38,486 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T00:31:38,487 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:31:38,487 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,35989,1731457897712, seqNum=-1] 2024-11-13T00:31:38,487 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:31:38,488 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43819, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:31:38,493 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 647 msec 2024-11-13T00:31:38,493 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731457898493, completionTime=-1 2024-11-13T00:31:38,493 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T00:31:38,493 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T00:31:38,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T00:31:38,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731457958495 2024-11-13T00:31:38,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731458018495 2024-11-13T00:31:38,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-13T00:31:38,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,39625,1731457897665-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:38,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,39625,1731457897665-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:38,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,39625,1731457897665-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:38,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-cb28580903fb:39625, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:38,495 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:38,496 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:38,497 DEBUG [master/cb28580903fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T00:31:38,499 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.757sec 2024-11-13T00:31:38,499 INFO [master/cb28580903fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T00:31:38,499 INFO [master/cb28580903fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T00:31:38,499 INFO [master/cb28580903fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T00:31:38,499 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T00:31:38,499 INFO [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T00:31:38,499 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,39625,1731457897665-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:31:38,499 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,39625,1731457897665-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T00:31:38,501 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T00:31:38,501 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T00:31:38,501 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,39625,1731457897665-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:31:38,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4110e616, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:31:38,526 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request cb28580903fb,39625,-1 for getting cluster id 2024-11-13T00:31:38,526 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T00:31:38,527 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fc359955-a732-4ca7-b2bc-7606d5e1d8c6' 2024-11-13T00:31:38,527 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T00:31:38,527 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fc359955-a732-4ca7-b2bc-7606d5e1d8c6" 2024-11-13T00:31:38,527 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d8850d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:31:38,528 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [cb28580903fb,39625,-1] 2024-11-13T00:31:38,528 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T00:31:38,528 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:31:38,529 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39612, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T00:31:38,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52331b46, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:31:38,530 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:31:38,531 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,35989,1731457897712, seqNum=-1] 2024-11-13T00:31:38,531 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:31:38,532 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57754, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:31:38,533 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=cb28580903fb,39625,1731457897665 2024-11-13T00:31:38,533 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:31:38,536 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T00:31:38,536 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-13T00:31:38,537 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is cb28580903fb,39625,1731457897665 2024-11-13T00:31:38,537 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7bd08b43 2024-11-13T00:31:38,537 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-13T00:31:38,538 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39628, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-13T00:31:38,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-13T00:31:38,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-13T00:31:38,539 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:31:38,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-13T00:31:38,541 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-13T00:31:38,541 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:38,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-13T00:31:38,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T00:31:38,542 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-13T00:31:38,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741835_1011 (size=381) 2024-11-13T00:31:38,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741835_1011 (size=381) 2024-11-13T00:31:38,550 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d7ca01440872ba9aaac8181c0fcee4c0, NAME => 'TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132 2024-11-13T00:31:38,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741836_1012 (size=64) 2024-11-13T00:31:38,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741836_1012 (size=64) 2024-11-13T00:31:38,556 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:31:38,556 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing d7ca01440872ba9aaac8181c0fcee4c0, disabling compactions & flushes 2024-11-13T00:31:38,556 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:38,556 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:38,556 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. after waiting 0 ms 2024-11-13T00:31:38,556 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:38,556 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:38,556 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for d7ca01440872ba9aaac8181c0fcee4c0: Waiting for close lock at 1731457898556Disabling compacts and flushes for region at 1731457898556Disabling writes for close at 1731457898556Writing region close event to WAL at 1731457898556Closed at 1731457898556 2024-11-13T00:31:38,558 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-13T00:31:38,558 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731457898558"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731457898558"}]},"ts":"1731457898558"} 2024-11-13T00:31:38,561 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-13T00:31:38,562 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-13T00:31:38,562 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731457898562"}]},"ts":"1731457898562"} 2024-11-13T00:31:38,564 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-13T00:31:38,564 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d7ca01440872ba9aaac8181c0fcee4c0, ASSIGN}] 2024-11-13T00:31:38,565 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d7ca01440872ba9aaac8181c0fcee4c0, ASSIGN 2024-11-13T00:31:38,566 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d7ca01440872ba9aaac8181c0fcee4c0, ASSIGN; state=OFFLINE, location=cb28580903fb,35989,1731457897712; forceNewPlan=false, retain=false 2024-11-13T00:31:38,717 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d7ca01440872ba9aaac8181c0fcee4c0, regionState=OPENING, regionLocation=cb28580903fb,35989,1731457897712 2024-11-13T00:31:38,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:38,719 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d7ca01440872ba9aaac8181c0fcee4c0, ASSIGN because future has completed 2024-11-13T00:31:38,720 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d7ca01440872ba9aaac8181c0fcee4c0, server=cb28580903fb,35989,1731457897712}] 2024-11-13T00:31:38,876 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:38,876 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d7ca01440872ba9aaac8181c0fcee4c0, NAME => 'TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:31:38,876 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,876 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:31:38,876 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,876 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,877 INFO [StoreOpener-d7ca01440872ba9aaac8181c0fcee4c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,879 INFO [StoreOpener-d7ca01440872ba9aaac8181c0fcee4c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d7ca01440872ba9aaac8181c0fcee4c0 columnFamilyName info 2024-11-13T00:31:38,879 DEBUG [StoreOpener-d7ca01440872ba9aaac8181c0fcee4c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:38,879 INFO [StoreOpener-d7ca01440872ba9aaac8181c0fcee4c0-1 {}] regionserver.HStore(327): Store=d7ca01440872ba9aaac8181c0fcee4c0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:31:38,879 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,880 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,880 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,881 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,881 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,882 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,884 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:31:38,884 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d7ca01440872ba9aaac8181c0fcee4c0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698007, jitterRate=-0.11243930459022522}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T00:31:38,884 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:38,885 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d7ca01440872ba9aaac8181c0fcee4c0: Running coprocessor pre-open hook at 1731457898876Writing region info on filesystem at 1731457898876Initializing all the Stores at 1731457898877 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457898877Cleaning up temporary data from old regions at 1731457898881 (+4 ms)Running coprocessor post-open hooks at 1731457898884 (+3 ms)Region opened successfully at 1731457898885 (+1 ms) 2024-11-13T00:31:38,886 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0., pid=6, masterSystemTime=1731457898872 2024-11-13T00:31:38,888 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:38,888 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:38,889 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d7ca01440872ba9aaac8181c0fcee4c0, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,35989,1731457897712 2024-11-13T00:31:38,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d7ca01440872ba9aaac8181c0fcee4c0, server=cb28580903fb,35989,1731457897712 because future has completed 2024-11-13T00:31:38,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-13T00:31:38,894 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d7ca01440872ba9aaac8181c0fcee4c0, server=cb28580903fb,35989,1731457897712 in 172 msec 2024-11-13T00:31:38,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-13T00:31:38,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d7ca01440872ba9aaac8181c0fcee4c0, ASSIGN in 330 msec 2024-11-13T00:31:38,897 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-13T00:31:38,898 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731457898897"}]},"ts":"1731457898897"} 2024-11-13T00:31:38,899 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-13T00:31:38,900 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-13T00:31:38,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 361 msec 2024-11-13T00:31:38,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:38,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:39,509 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-13T00:31:39,509 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-13T00:31:39,510 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-13T00:31:39,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:39,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:39,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:40,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:40,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:40,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:41,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,468 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,468 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,469 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,469 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,469 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,469 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,471 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,474 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:41,977 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T00:31:41,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,980 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:41,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:41,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:41,997 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:42,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:42,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:42,000 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:42,002 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:42,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:42,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:42,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:43,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:43,953 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-13T00:31:43,953 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-13T00:31:43,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:43,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:44,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:44,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:44,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:45,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:45,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:45,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:46,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:46,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:46,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:47,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:47,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:47,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:48,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39625 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-13T00:31:48,645 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-13T00:31:48,645 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-13T00:31:48,648 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-13T00:31:48,648 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:48,650 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0., hostname=cb28580903fb,35989,1731457897712, seqNum=2] 2024-11-13T00:31:48,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:48,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d7ca01440872ba9aaac8181c0fcee4c0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T00:31:48,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/ee508f268c88429fa38b199bbf3bdc00 is 1080, key is row0001/info:/1731457908651/Put/seqid=0 2024-11-13T00:31:48,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741837_1013 (size=12509) 2024-11-13T00:31:48,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741837_1013 (size=12509) 2024-11-13T00:31:48,685 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/ee508f268c88429fa38b199bbf3bdc00 2024-11-13T00:31:48,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/ee508f268c88429fa38b199bbf3bdc00 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/ee508f268c88429fa38b199bbf3bdc00 2024-11-13T00:31:48,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/ee508f268c88429fa38b199bbf3bdc00, entries=7, sequenceid=11, filesize=12.2 K 2024-11-13T00:31:48,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for d7ca01440872ba9aaac8181c0fcee4c0 in 36ms, sequenceid=11, compaction requested=false 2024-11-13T00:31:48,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d7ca01440872ba9aaac8181c0fcee4c0: 2024-11-13T00:31:48,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:48,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d7ca01440872ba9aaac8181c0fcee4c0 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-13T00:31:48,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/3210294108084e778a80a3a15d0c9fcf is 1080, key is row0008/info:/1731457908662/Put/seqid=0 2024-11-13T00:31:48,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741838_1014 (size=29761) 2024-11-13T00:31:48,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741838_1014 (size=29761) 2024-11-13T00:31:48,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/3210294108084e778a80a3a15d0c9fcf 2024-11-13T00:31:48,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/3210294108084e778a80a3a15d0c9fcf as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/3210294108084e778a80a3a15d0c9fcf 2024-11-13T00:31:48,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/3210294108084e778a80a3a15d0c9fcf, entries=23, sequenceid=37, filesize=29.1 K 2024-11-13T00:31:48,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for d7ca01440872ba9aaac8181c0fcee4c0 in 19ms, sequenceid=37, compaction requested=false 2024-11-13T00:31:48,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d7ca01440872ba9aaac8181c0fcee4c0: 2024-11-13T00:31:48,717 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-13T00:31:48,717 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:31:48,717 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/3210294108084e778a80a3a15d0c9fcf because midkey is the same as first or last row 2024-11-13T00:31:48,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:48,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:48,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:49,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:49,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:49,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:50,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:50,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d7ca01440872ba9aaac8181c0fcee4c0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T00:31:50,715 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/9bfa76355f8542899a30688962a4b211 is 1080, key is row0031/info:/1731457908699/Put/seqid=0 2024-11-13T00:31:50,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741839_1015 (size=12509) 2024-11-13T00:31:50,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741839_1015 (size=12509) 2024-11-13T00:31:50,720 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/9bfa76355f8542899a30688962a4b211 2024-11-13T00:31:50,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:50,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/9bfa76355f8542899a30688962a4b211 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9bfa76355f8542899a30688962a4b211 2024-11-13T00:31:50,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9bfa76355f8542899a30688962a4b211, entries=7, sequenceid=47, filesize=12.2 K 2024-11-13T00:31:50,733 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for d7ca01440872ba9aaac8181c0fcee4c0 in 23ms, sequenceid=47, compaction requested=true 2024-11-13T00:31:50,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d7ca01440872ba9aaac8181c0fcee4c0: 2024-11-13T00:31:50,733 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-13T00:31:50,733 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:31:50,733 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/3210294108084e778a80a3a15d0c9fcf because midkey is the same as first or last row 2024-11-13T00:31:50,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d7ca01440872ba9aaac8181c0fcee4c0:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:31:50,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:31:50,734 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:31:50,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:50,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d7ca01440872ba9aaac8181c0fcee4c0 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-13T00:31:50,735 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:31:50,735 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1541): d7ca01440872ba9aaac8181c0fcee4c0/info is initiating minor compaction (all files) 2024-11-13T00:31:50,735 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d7ca01440872ba9aaac8181c0fcee4c0/info in TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:50,735 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/ee508f268c88429fa38b199bbf3bdc00, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/3210294108084e778a80a3a15d0c9fcf, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9bfa76355f8542899a30688962a4b211] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp, totalSize=53.5 K 2024-11-13T00:31:50,736 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting ee508f268c88429fa38b199bbf3bdc00, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731457908651 2024-11-13T00:31:50,736 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3210294108084e778a80a3a15d0c9fcf, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731457908662 2024-11-13T00:31:50,737 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9bfa76355f8542899a30688962a4b211, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731457908699 2024-11-13T00:31:50,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/90a146b0f8a2436ca63750b76caea2ac is 1080, key is row0038/info:/1731457910712/Put/seqid=0 2024-11-13T00:31:50,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741840_1016 (size=18987) 2024-11-13T00:31:50,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741840_1016 (size=18987) 2024-11-13T00:31:50,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/90a146b0f8a2436ca63750b76caea2ac 2024-11-13T00:31:50,750 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d7ca01440872ba9aaac8181c0fcee4c0#info#compaction#61 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:31:50,751 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/83a6a15f537c49ada3c6d9786b79e174 is 1080, key is row0001/info:/1731457908651/Put/seqid=0 2024-11-13T00:31:50,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/90a146b0f8a2436ca63750b76caea2ac as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/90a146b0f8a2436ca63750b76caea2ac 2024-11-13T00:31:50,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741841_1017 (size=44978) 2024-11-13T00:31:50,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741841_1017 (size=44978) 2024-11-13T00:31:50,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/90a146b0f8a2436ca63750b76caea2ac, entries=13, sequenceid=63, filesize=18.5 K 2024-11-13T00:31:50,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for d7ca01440872ba9aaac8181c0fcee4c0 in 27ms, sequenceid=63, compaction requested=false 2024-11-13T00:31:50,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d7ca01440872ba9aaac8181c0fcee4c0: 2024-11-13T00:31:50,761 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.0 K, sizeToCheck=16.0 K 2024-11-13T00:31:50,761 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:31:50,761 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/3210294108084e778a80a3a15d0c9fcf because midkey is the same as first or last row 2024-11-13T00:31:50,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:50,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing d7ca01440872ba9aaac8181c0fcee4c0 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-13T00:31:50,763 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/83a6a15f537c49ada3c6d9786b79e174 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/83a6a15f537c49ada3c6d9786b79e174 2024-11-13T00:31:50,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/d9eddf58c8fe456190cb6917bb65e0b1 is 1080, key is row0051/info:/1731457910735/Put/seqid=0 2024-11-13T00:31:50,769 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d7ca01440872ba9aaac8181c0fcee4c0/info of d7ca01440872ba9aaac8181c0fcee4c0 into 83a6a15f537c49ada3c6d9786b79e174(size=43.9 K), total size for store is 62.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:31:50,769 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d7ca01440872ba9aaac8181c0fcee4c0: 2024-11-13T00:31:50,769 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0., storeName=d7ca01440872ba9aaac8181c0fcee4c0/info, priority=13, startTime=1731457910733; duration=0sec 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=62.5 K, sizeToCheck=16.0 K 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/83a6a15f537c49ada3c6d9786b79e174 because midkey is the same as first or last row 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=62.5 K, sizeToCheck=16.0 K 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/83a6a15f537c49ada3c6d9786b79e174 because midkey is the same as first or last row 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=62.5 K, sizeToCheck=16.0 K 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/83a6a15f537c49ada3c6d9786b79e174 because midkey is the same as first or last row 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:31:50,770 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d7ca01440872ba9aaac8181c0fcee4c0:info 2024-11-13T00:31:50,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741842_1018 (size=18987) 2024-11-13T00:31:50,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741842_1018 (size=18987) 2024-11-13T00:31:50,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/d9eddf58c8fe456190cb6917bb65e0b1 2024-11-13T00:31:50,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/d9eddf58c8fe456190cb6917bb65e0b1 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d9eddf58c8fe456190cb6917bb65e0b1 2024-11-13T00:31:50,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d9eddf58c8fe456190cb6917bb65e0b1, entries=13, sequenceid=79, filesize=18.5 K 2024-11-13T00:31:50,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for d7ca01440872ba9aaac8181c0fcee4c0 in 24ms, sequenceid=79, compaction requested=true 2024-11-13T00:31:50,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for d7ca01440872ba9aaac8181c0fcee4c0: 2024-11-13T00:31:50,786 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=81.0 K, sizeToCheck=16.0 K 2024-11-13T00:31:50,787 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:31:50,787 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/83a6a15f537c49ada3c6d9786b79e174 because midkey is the same as first or last row 2024-11-13T00:31:50,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d7ca01440872ba9aaac8181c0fcee4c0:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:31:50,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:31:50,787 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:31:50,788 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 82952 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:31:50,788 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1541): d7ca01440872ba9aaac8181c0fcee4c0/info is initiating minor compaction (all files) 2024-11-13T00:31:50,788 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of d7ca01440872ba9aaac8181c0fcee4c0/info in TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:50,788 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/83a6a15f537c49ada3c6d9786b79e174, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/90a146b0f8a2436ca63750b76caea2ac, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d9eddf58c8fe456190cb6917bb65e0b1] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp, totalSize=81.0 K 2024-11-13T00:31:50,788 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 83a6a15f537c49ada3c6d9786b79e174, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1731457908651 2024-11-13T00:31:50,788 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 90a146b0f8a2436ca63750b76caea2ac, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1731457910712 2024-11-13T00:31:50,789 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting d9eddf58c8fe456190cb6917bb65e0b1, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731457910735 2024-11-13T00:31:50,800 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d7ca01440872ba9aaac8181c0fcee4c0#info#compaction#63 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:31:50,801 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/d2f57a9da59c428a9f552b25a2e381fa is 1080, key is row0001/info:/1731457908651/Put/seqid=0 2024-11-13T00:31:50,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741843_1019 (size=73224) 2024-11-13T00:31:50,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741843_1019 (size=73224) 2024-11-13T00:31:50,811 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/d2f57a9da59c428a9f552b25a2e381fa as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d2f57a9da59c428a9f552b25a2e381fa 2024-11-13T00:31:50,818 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in d7ca01440872ba9aaac8181c0fcee4c0/info of d7ca01440872ba9aaac8181c0fcee4c0 into d2f57a9da59c428a9f552b25a2e381fa(size=71.5 K), total size for store is 71.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:31:50,818 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for d7ca01440872ba9aaac8181c0fcee4c0: 2024-11-13T00:31:50,818 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0., storeName=d7ca01440872ba9aaac8181c0fcee4c0/info, priority=13, startTime=1731457910787; duration=0sec 2024-11-13T00:31:50,818 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-13T00:31:50,818 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:31:50,818 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-13T00:31:50,818 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:31:50,818 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.5 K, sizeToCheck=16.0 K 2024-11-13T00:31:50,818 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-13T00:31:50,819 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:31:50,819 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:31:50,819 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d7ca01440872ba9aaac8181c0fcee4c0:info 2024-11-13T00:31:50,820 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] assignment.AssignmentManager(1355): Split request from cb28580903fb,35989,1731457897712, parent={ENCODED => d7ca01440872ba9aaac8181c0fcee4c0, NAME => 'TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-13T00:31:50,825 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=cb28580903fb,35989,1731457897712 2024-11-13T00:31:50,829 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39625 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d7ca01440872ba9aaac8181c0fcee4c0, daughterA=7b90f83fab0270206e7a13b40fbba0e2, daughterB=accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:50,830 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d7ca01440872ba9aaac8181c0fcee4c0, daughterA=7b90f83fab0270206e7a13b40fbba0e2, daughterB=accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:50,830 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d7ca01440872ba9aaac8181c0fcee4c0, daughterA=7b90f83fab0270206e7a13b40fbba0e2, daughterB=accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:50,830 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d7ca01440872ba9aaac8181c0fcee4c0, daughterA=7b90f83fab0270206e7a13b40fbba0e2, daughterB=accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:50,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d7ca01440872ba9aaac8181c0fcee4c0, UNASSIGN}] 2024-11-13T00:31:50,839 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d7ca01440872ba9aaac8181c0fcee4c0, UNASSIGN 2024-11-13T00:31:50,840 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=d7ca01440872ba9aaac8181c0fcee4c0, regionState=CLOSING, regionLocation=cb28580903fb,35989,1731457897712 2024-11-13T00:31:50,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d7ca01440872ba9aaac8181c0fcee4c0, UNASSIGN because future has completed 2024-11-13T00:31:50,843 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-13T00:31:50,843 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure d7ca01440872ba9aaac8181c0fcee4c0, server=cb28580903fb,35989,1731457897712}] 2024-11-13T00:31:50,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:50,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:51,000 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:51,000 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-13T00:31:51,001 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing d7ca01440872ba9aaac8181c0fcee4c0, disabling compactions & flushes 2024-11-13T00:31:51,001 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:51,001 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:51,001 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. after waiting 0 ms 2024-11-13T00:31:51,001 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:51,001 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing d7ca01440872ba9aaac8181c0fcee4c0 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-13T00:31:51,005 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/9e7a8726f4384cc88eaa9ecd115fcc8e is 1080, key is row0064/info:/1731457910764/Put/seqid=0 2024-11-13T00:31:51,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741844_1020 (size=6033) 2024-11-13T00:31:51,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741844_1020 (size=6033) 2024-11-13T00:31:51,010 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/9e7a8726f4384cc88eaa9ecd115fcc8e 2024-11-13T00:31:51,015 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/.tmp/info/9e7a8726f4384cc88eaa9ecd115fcc8e as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9e7a8726f4384cc88eaa9ecd115fcc8e 2024-11-13T00:31:51,020 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9e7a8726f4384cc88eaa9ecd115fcc8e, entries=1, sequenceid=85, filesize=5.9 K 2024-11-13T00:31:51,021 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for d7ca01440872ba9aaac8181c0fcee4c0 in 20ms, sequenceid=85, compaction requested=false 2024-11-13T00:31:51,022 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/ee508f268c88429fa38b199bbf3bdc00, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/3210294108084e778a80a3a15d0c9fcf, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/83a6a15f537c49ada3c6d9786b79e174, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9bfa76355f8542899a30688962a4b211, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/90a146b0f8a2436ca63750b76caea2ac, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d9eddf58c8fe456190cb6917bb65e0b1] to archive 2024-11-13T00:31:51,023 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T00:31:51,025 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/ee508f268c88429fa38b199bbf3bdc00 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/ee508f268c88429fa38b199bbf3bdc00 2024-11-13T00:31:51,026 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/3210294108084e778a80a3a15d0c9fcf to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/3210294108084e778a80a3a15d0c9fcf 2024-11-13T00:31:51,027 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/83a6a15f537c49ada3c6d9786b79e174 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/83a6a15f537c49ada3c6d9786b79e174 2024-11-13T00:31:51,028 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9bfa76355f8542899a30688962a4b211 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9bfa76355f8542899a30688962a4b211 2024-11-13T00:31:51,029 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/90a146b0f8a2436ca63750b76caea2ac to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/90a146b0f8a2436ca63750b76caea2ac 2024-11-13T00:31:51,031 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d9eddf58c8fe456190cb6917bb65e0b1 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d9eddf58c8fe456190cb6917bb65e0b1 2024-11-13T00:31:51,036 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=1 2024-11-13T00:31:51,037 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. 2024-11-13T00:31:51,037 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for d7ca01440872ba9aaac8181c0fcee4c0: Waiting for close lock at 1731457911001Running coprocessor pre-close hooks at 1731457911001Disabling compacts and flushes for region at 1731457911001Disabling writes for close at 1731457911001Obtaining lock to block concurrent updates at 1731457911001Preparing flush snapshotting stores in d7ca01440872ba9aaac8181c0fcee4c0 at 1731457911001Finished memstore snapshotting TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731457911001Flushing stores of TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. at 1731457911002 (+1 ms)Flushing d7ca01440872ba9aaac8181c0fcee4c0/info: creating writer at 1731457911002Flushing d7ca01440872ba9aaac8181c0fcee4c0/info: appending metadata at 1731457911005 (+3 ms)Flushing d7ca01440872ba9aaac8181c0fcee4c0/info: closing flushed file at 1731457911005Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15c8a23f: reopening flushed file at 1731457911015 (+10 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for d7ca01440872ba9aaac8181c0fcee4c0 in 20ms, sequenceid=85, compaction requested=false at 1731457911021 (+6 ms)Writing region close event to WAL at 1731457911033 (+12 ms)Running coprocessor post-close hooks at 1731457911037 (+4 ms)Closed at 1731457911037 2024-11-13T00:31:51,040 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:51,040 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=d7ca01440872ba9aaac8181c0fcee4c0, regionState=CLOSED 2024-11-13T00:31:51,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure d7ca01440872ba9aaac8181c0fcee4c0, server=cb28580903fb,35989,1731457897712 because future has completed 2024-11-13T00:31:51,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-13T00:31:51,046 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure d7ca01440872ba9aaac8181c0fcee4c0, server=cb28580903fb,35989,1731457897712 in 200 msec 2024-11-13T00:31:51,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-13T00:31:51,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=d7ca01440872ba9aaac8181c0fcee4c0, UNASSIGN in 209 msec 2024-11-13T00:31:51,055 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:51,058 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=d7ca01440872ba9aaac8181c0fcee4c0, threads=2 2024-11-13T00:31:51,060 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9e7a8726f4384cc88eaa9ecd115fcc8e for region: d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:51,060 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d2f57a9da59c428a9f552b25a2e381fa for region: d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:51,068 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9e7a8726f4384cc88eaa9ecd115fcc8e, top=true 2024-11-13T00:31:51,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741845_1021 (size=27) 2024-11-13T00:31:51,073 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/TestLogRolling-testLogRolling=d7ca01440872ba9aaac8181c0fcee4c0-9e7a8726f4384cc88eaa9ecd115fcc8e for child: accf212f08b962d4f9ef633e1c33d603, parent: d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:51,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741845_1021 (size=27) 2024-11-13T00:31:51,073 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/9e7a8726f4384cc88eaa9ecd115fcc8e for region: d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:51,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741846_1022 (size=27) 2024-11-13T00:31:51,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741846_1022 (size=27) 2024-11-13T00:31:51,081 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d2f57a9da59c428a9f552b25a2e381fa for region: d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:31:51,083 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region d7ca01440872ba9aaac8181c0fcee4c0 Daughter A: [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0] storefiles, Daughter B: [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/TestLogRolling-testLogRolling=d7ca01440872ba9aaac8181c0fcee4c0-9e7a8726f4384cc88eaa9ecd115fcc8e, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0] storefiles. 2024-11-13T00:31:51,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741847_1023 (size=71) 2024-11-13T00:31:51,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741847_1023 (size=71) 2024-11-13T00:31:51,092 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:51,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741848_1024 (size=71) 2024-11-13T00:31:51,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741848_1024 (size=71) 2024-11-13T00:31:51,105 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:51,113 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-13T00:31:51,116 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/recovered.edits/88.seqid, newMaxSeqId=88, maxSeqId=-1 2024-11-13T00:31:51,119 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731457911118"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731457911118"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731457911118"}]},"ts":"1731457911118"} 2024-11-13T00:31:51,119 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731457911118"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731457911118"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731457911118"}]},"ts":"1731457911118"} 2024-11-13T00:31:51,119 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731457911118"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731457911118"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731457911118"}]},"ts":"1731457911118"} 2024-11-13T00:31:51,138 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7b90f83fab0270206e7a13b40fbba0e2, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=accf212f08b962d4f9ef633e1c33d603, ASSIGN}] 2024-11-13T00:31:51,139 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7b90f83fab0270206e7a13b40fbba0e2, ASSIGN 2024-11-13T00:31:51,139 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=accf212f08b962d4f9ef633e1c33d603, ASSIGN 2024-11-13T00:31:51,140 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7b90f83fab0270206e7a13b40fbba0e2, ASSIGN; state=SPLITTING_NEW, location=cb28580903fb,35989,1731457897712; forceNewPlan=false, retain=false 2024-11-13T00:31:51,140 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=accf212f08b962d4f9ef633e1c33d603, ASSIGN; state=SPLITTING_NEW, location=cb28580903fb,35989,1731457897712; forceNewPlan=false, retain=false 2024-11-13T00:31:51,291 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=7b90f83fab0270206e7a13b40fbba0e2, regionState=OPENING, regionLocation=cb28580903fb,35989,1731457897712 2024-11-13T00:31:51,291 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=accf212f08b962d4f9ef633e1c33d603, regionState=OPENING, regionLocation=cb28580903fb,35989,1731457897712 2024-11-13T00:31:51,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7b90f83fab0270206e7a13b40fbba0e2, ASSIGN because future has completed 2024-11-13T00:31:51,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7b90f83fab0270206e7a13b40fbba0e2, server=cb28580903fb,35989,1731457897712}] 2024-11-13T00:31:51,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=accf212f08b962d4f9ef633e1c33d603, ASSIGN because future has completed 2024-11-13T00:31:51,295 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure accf212f08b962d4f9ef633e1c33d603, server=cb28580903fb,35989,1731457897712}] 2024-11-13T00:31:51,449 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:31:51,449 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => accf212f08b962d4f9ef633e1c33d603, NAME => 'TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-13T00:31:51,449 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,450 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:31:51,450 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,450 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,451 INFO [StoreOpener-accf212f08b962d4f9ef633e1c33d603-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,451 INFO [StoreOpener-accf212f08b962d4f9ef633e1c33d603-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region accf212f08b962d4f9ef633e1c33d603 columnFamilyName info 2024-11-13T00:31:51,452 DEBUG [StoreOpener-accf212f08b962d4f9ef633e1c33d603-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:51,460 DEBUG [StoreOpener-accf212f08b962d4f9ef633e1c33d603-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/TestLogRolling-testLogRolling=d7ca01440872ba9aaac8181c0fcee4c0-9e7a8726f4384cc88eaa9ecd115fcc8e 2024-11-13T00:31:51,466 DEBUG [StoreOpener-accf212f08b962d4f9ef633e1c33d603-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0->hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d2f57a9da59c428a9f552b25a2e381fa-top 2024-11-13T00:31:51,467 INFO [StoreOpener-accf212f08b962d4f9ef633e1c33d603-1 {}] regionserver.HStore(327): Store=accf212f08b962d4f9ef633e1c33d603/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:31:51,467 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,468 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,469 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,469 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,469 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,470 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,471 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened accf212f08b962d4f9ef633e1c33d603; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=704730, jitterRate=-0.10389070212841034}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T00:31:51,471 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:31:51,471 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for accf212f08b962d4f9ef633e1c33d603: Running coprocessor pre-open hook at 1731457911450Writing region info on filesystem at 1731457911450Initializing all the Stores at 1731457911450Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457911450Cleaning up temporary data from old regions at 1731457911469 (+19 ms)Running coprocessor post-open hooks at 1731457911471 (+2 ms)Region opened successfully at 1731457911471 2024-11-13T00:31:51,472 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., pid=13, masterSystemTime=1731457911446 2024-11-13T00:31:51,472 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store accf212f08b962d4f9ef633e1c33d603:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:31:51,472 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:31:51,472 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-13T00:31:51,473 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:31:51,473 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1541): accf212f08b962d4f9ef633e1c33d603/info is initiating minor compaction (all files) 2024-11-13T00:31:51,473 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of accf212f08b962d4f9ef633e1c33d603/info in TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:31:51,474 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0->hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d2f57a9da59c428a9f552b25a2e381fa-top, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/TestLogRolling-testLogRolling=d7ca01440872ba9aaac8181c0fcee4c0-9e7a8726f4384cc88eaa9ecd115fcc8e] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp, totalSize=77.4 K 2024-11-13T00:31:51,474 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1731457908651 2024-11-13T00:31:51,474 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:31:51,474 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:31:51,475 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=d7ca01440872ba9aaac8181c0fcee4c0-9e7a8726f4384cc88eaa9ecd115fcc8e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731457910764 2024-11-13T00:31:51,475 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. 2024-11-13T00:31:51,475 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 7b90f83fab0270206e7a13b40fbba0e2, NAME => 'TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-13T00:31:51,475 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,475 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:31:51,475 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,475 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,475 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=accf212f08b962d4f9ef633e1c33d603, regionState=OPEN, openSeqNum=89, regionLocation=cb28580903fb,35989,1731457897712 2024-11-13T00:31:51,477 INFO [StoreOpener-7b90f83fab0270206e7a13b40fbba0e2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,477 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-13T00:31:51,477 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-13T00:31:51,477 INFO [StoreOpener-7b90f83fab0270206e7a13b40fbba0e2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b90f83fab0270206e7a13b40fbba0e2 columnFamilyName info 2024-11-13T00:31:51,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-13T00:31:51,477 DEBUG [StoreOpener-7b90f83fab0270206e7a13b40fbba0e2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:31:51,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure accf212f08b962d4f9ef633e1c33d603, server=cb28580903fb,35989,1731457897712 because future has completed 2024-11-13T00:31:51,481 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-13T00:31:51,481 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure accf212f08b962d4f9ef633e1c33d603, server=cb28580903fb,35989,1731457897712 in 184 msec 2024-11-13T00:31:51,486 DEBUG [StoreOpener-7b90f83fab0270206e7a13b40fbba0e2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0->hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d2f57a9da59c428a9f552b25a2e381fa-bottom 2024-11-13T00:31:51,487 INFO [StoreOpener-7b90f83fab0270206e7a13b40fbba0e2-1 {}] regionserver.HStore(327): Store=7b90f83fab0270206e7a13b40fbba0e2/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:31:51,487 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,487 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,488 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=accf212f08b962d4f9ef633e1c33d603, ASSIGN in 343 msec 2024-11-13T00:31:51,489 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,489 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,489 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,491 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,492 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 7b90f83fab0270206e7a13b40fbba0e2; next sequenceid=89; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688394, jitterRate=-0.1246621310710907}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-13T00:31:51,492 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:31:51,492 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 7b90f83fab0270206e7a13b40fbba0e2: Running coprocessor pre-open hook at 1731457911475Writing region info on filesystem at 1731457911475Initializing all the Stores at 1731457911476 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457911476Cleaning up temporary data from old regions at 1731457911489 (+13 ms)Running coprocessor post-open hooks at 1731457911492 (+3 ms)Region opened successfully at 1731457911492 2024-11-13T00:31:51,493 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2., pid=12, masterSystemTime=1731457911446 2024-11-13T00:31:51,493 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 7b90f83fab0270206e7a13b40fbba0e2:info, priority=-2147483648, current under compaction store size is 2 2024-11-13T00:31:51,493 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:31:51,493 DEBUG [RS:0;cb28580903fb:35989-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-13T00:31:51,493 INFO [RS:0;cb28580903fb:35989-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. 2024-11-13T00:31:51,493 DEBUG [RS:0;cb28580903fb:35989-longCompactions-0 {}] regionserver.HStore(1541): 7b90f83fab0270206e7a13b40fbba0e2/info is initiating minor compaction (all files) 2024-11-13T00:31:51,494 INFO [RS:0;cb28580903fb:35989-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7b90f83fab0270206e7a13b40fbba0e2/info in TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. 2024-11-13T00:31:51,494 INFO [RS:0;cb28580903fb:35989-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0->hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d2f57a9da59c428a9f552b25a2e381fa-bottom] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/.tmp, totalSize=71.5 K 2024-11-13T00:31:51,494 DEBUG [RS:0;cb28580903fb:35989-longCompactions-0 {}] compactions.Compactor(225): Compacting d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0, keycount=31, bloomtype=ROW, size=71.5 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731457908651 2024-11-13T00:31:51,495 DEBUG [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. 2024-11-13T00:31:51,495 INFO [RS_OPEN_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. 2024-11-13T00:31:51,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/info/ec7f854ad3bb4db5b3b22975eb31d9c7 is 193, key is TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603./info:regioninfo/1731457911475/Put/seqid=0 2024-11-13T00:31:51,496 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=7b90f83fab0270206e7a13b40fbba0e2, regionState=OPEN, openSeqNum=89, regionLocation=cb28580903fb,35989,1731457897712 2024-11-13T00:31:51,496 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): accf212f08b962d4f9ef633e1c33d603#info#compaction#66 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:31:51,497 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/38d9257cdb684588a9036afc22f0d10c is 1080, key is row0062/info:/1731457910759/Put/seqid=0 2024-11-13T00:31:51,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7b90f83fab0270206e7a13b40fbba0e2, server=cb28580903fb,35989,1731457897712 because future has completed 2024-11-13T00:31:51,501 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-13T00:31:51,502 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 7b90f83fab0270206e7a13b40fbba0e2, server=cb28580903fb,35989,1731457897712 in 206 msec 2024-11-13T00:31:51,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741850_1026 (size=8359) 2024-11-13T00:31:51,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741850_1026 (size=8359) 2024-11-13T00:31:51,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-13T00:31:51,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7b90f83fab0270206e7a13b40fbba0e2, ASSIGN in 364 msec 2024-11-13T00:31:51,507 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=d7ca01440872ba9aaac8181c0fcee4c0, daughterA=7b90f83fab0270206e7a13b40fbba0e2, daughterB=accf212f08b962d4f9ef633e1c33d603 in 679 msec 2024-11-13T00:31:51,507 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/38d9257cdb684588a9036afc22f0d10c as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/38d9257cdb684588a9036afc22f0d10c 2024-11-13T00:31:51,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741849_1025 (size=9882) 2024-11-13T00:31:51,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741849_1025 (size=9882) 2024-11-13T00:31:51,510 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/info/ec7f854ad3bb4db5b3b22975eb31d9c7 2024-11-13T00:31:51,514 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in accf212f08b962d4f9ef633e1c33d603/info of accf212f08b962d4f9ef633e1c33d603 into 38d9257cdb684588a9036afc22f0d10c(size=8.2 K), total size for store is 8.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:31:51,514 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:31:51,514 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., storeName=accf212f08b962d4f9ef633e1c33d603/info, priority=14, startTime=1731457911472; duration=0sec 2024-11-13T00:31:51,514 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:31:51,514 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: accf212f08b962d4f9ef633e1c33d603:info 2024-11-13T00:31:51,515 INFO [RS:0;cb28580903fb:35989-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b90f83fab0270206e7a13b40fbba0e2#info#compaction#67 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:31:51,515 DEBUG [RS:0;cb28580903fb:35989-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/.tmp/info/a54ae4d7b6ca4c34af50a1e9292363f2 is 1080, key is row0001/info:/1731457908651/Put/seqid=0 2024-11-13T00:31:51,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741851_1027 (size=70862) 2024-11-13T00:31:51,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741851_1027 (size=70862) 2024-11-13T00:31:51,528 DEBUG [RS:0;cb28580903fb:35989-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/.tmp/info/a54ae4d7b6ca4c34af50a1e9292363f2 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/info/a54ae4d7b6ca4c34af50a1e9292363f2 2024-11-13T00:31:51,533 INFO [RS:0;cb28580903fb:35989-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 7b90f83fab0270206e7a13b40fbba0e2/info of 7b90f83fab0270206e7a13b40fbba0e2 into a54ae4d7b6ca4c34af50a1e9292363f2(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:31:51,534 DEBUG [RS:0;cb28580903fb:35989-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7b90f83fab0270206e7a13b40fbba0e2: 2024-11-13T00:31:51,534 INFO [RS:0;cb28580903fb:35989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2., storeName=7b90f83fab0270206e7a13b40fbba0e2/info, priority=15, startTime=1731457911493; duration=0sec 2024-11-13T00:31:51,534 DEBUG [RS:0;cb28580903fb:35989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:31:51,534 DEBUG [RS:0;cb28580903fb:35989-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b90f83fab0270206e7a13b40fbba0e2:info 2024-11-13T00:31:51,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/ns/d38e2748d9254c3fb04b693b69ff6071 is 43, key is default/ns:d/1731457898489/Put/seqid=0 2024-11-13T00:31:51,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741852_1028 (size=5153) 2024-11-13T00:31:51,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741852_1028 (size=5153) 2024-11-13T00:31:51,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/ns/d38e2748d9254c3fb04b693b69ff6071 2024-11-13T00:31:51,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/table/0d8d94276d174a379d0ee9e8e27ea5cb is 65, key is TestLogRolling-testLogRolling/table:state/1731457898897/Put/seqid=0 2024-11-13T00:31:51,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741853_1029 (size=5340) 2024-11-13T00:31:51,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741853_1029 (size=5340) 2024-11-13T00:31:51,566 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/table/0d8d94276d174a379d0ee9e8e27ea5cb 2024-11-13T00:31:51,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/info/ec7f854ad3bb4db5b3b22975eb31d9c7 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/info/ec7f854ad3bb4db5b3b22975eb31d9c7 2024-11-13T00:31:51,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/info/ec7f854ad3bb4db5b3b22975eb31d9c7, entries=30, sequenceid=17, filesize=9.7 K 2024-11-13T00:31:51,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/ns/d38e2748d9254c3fb04b693b69ff6071 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/ns/d38e2748d9254c3fb04b693b69ff6071 2024-11-13T00:31:51,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/ns/d38e2748d9254c3fb04b693b69ff6071, entries=2, sequenceid=17, filesize=5.0 K 2024-11-13T00:31:51,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/table/0d8d94276d174a379d0ee9e8e27ea5cb as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/table/0d8d94276d174a379d0ee9e8e27ea5cb 2024-11-13T00:31:51,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/table/0d8d94276d174a379d0ee9e8e27ea5cb, entries=2, sequenceid=17, filesize=5.2 K 2024-11-13T00:31:51,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 109ms, sequenceid=17, compaction requested=false 2024-11-13T00:31:51,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T00:31:51,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:51,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:51,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:52,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:52,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:57754 deadline: 1731457922766, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. is not online on cb28580903fb,35989,1731457897712 2024-11-13T00:31:52,789 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0., hostname=cb28580903fb,35989,1731457897712, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0., hostname=cb28580903fb,35989,1731457897712, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. is not online on cb28580903fb,35989,1731457897712 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T00:31:52,790 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0., hostname=cb28580903fb,35989,1731457897712, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0. is not online on cb28580903fb,35989,1731457897712 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-13T00:31:52,790 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731457898538.d7ca01440872ba9aaac8181c0fcee4c0., hostname=cb28580903fb,35989,1731457897712, seqNum=2 from cache 2024-11-13T00:31:52,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:52,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:53,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:53,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:53,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:54,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:54,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:54,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:55,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:55,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:55,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:56,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,038 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,062 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,063 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,064 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,069 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,072 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,582 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-13T00:31:56,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-13T00:31:56,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:56,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:56,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:57,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:57,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:57,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:58,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:58,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:58,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:59,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:59,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:31:59,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:00,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:00,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:00,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:01,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:01,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:01,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:02,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:02,871 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., hostname=cb28580903fb,35989,1731457897712, seqNum=89] 2024-11-13T00:32:02,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:02,883 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T00:32:02,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/a722e576a4874194babf899d4cc640cb is 1080, key is row0065/info:/1731457922872/Put/seqid=0 2024-11-13T00:32:02,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741854_1030 (size=12509) 2024-11-13T00:32:02,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741854_1030 (size=12509) 2024-11-13T00:32:02,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/a722e576a4874194babf899d4cc640cb 2024-11-13T00:32:02,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/a722e576a4874194babf899d4cc640cb as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a722e576a4874194babf899d4cc640cb 2024-11-13T00:32:02,903 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a722e576a4874194babf899d4cc640cb, entries=7, sequenceid=99, filesize=12.2 K 2024-11-13T00:32:02,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=13.66 KB/13988 for accf212f08b962d4f9ef633e1c33d603 in 22ms, sequenceid=99, compaction requested=false 2024-11-13T00:32:02,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:02,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-13T00:32:02,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/3421acaefd2b49749f5d4a19ecf4bc66 is 1080, key is row0072/info:/1731457922884/Put/seqid=0 2024-11-13T00:32:02,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741855_1031 (size=20064) 2024-11-13T00:32:02,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741855_1031 (size=20064) 2024-11-13T00:32:02,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/3421acaefd2b49749f5d4a19ecf4bc66 2024-11-13T00:32:02,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/3421acaefd2b49749f5d4a19ecf4bc66 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/3421acaefd2b49749f5d4a19ecf4bc66 2024-11-13T00:32:02,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/3421acaefd2b49749f5d4a19ecf4bc66, entries=14, sequenceid=116, filesize=19.6 K 2024-11-13T00:32:02,926 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for accf212f08b962d4f9ef633e1c33d603 in 21ms, sequenceid=116, compaction requested=true 2024-11-13T00:32:02,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:02,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store accf212f08b962d4f9ef633e1c33d603:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:32:02,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:02,926 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:32:02,927 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40932 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:32:02,927 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1541): accf212f08b962d4f9ef633e1c33d603/info is initiating minor compaction (all files) 2024-11-13T00:32:02,927 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of accf212f08b962d4f9ef633e1c33d603/info in TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:02,927 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/38d9257cdb684588a9036afc22f0d10c, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a722e576a4874194babf899d4cc640cb, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/3421acaefd2b49749f5d4a19ecf4bc66] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp, totalSize=40.0 K 2024-11-13T00:32:02,928 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 38d9257cdb684588a9036afc22f0d10c, keycount=3, bloomtype=ROW, size=8.2 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731457910759 2024-11-13T00:32:02,928 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting a722e576a4874194babf899d4cc640cb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1731457922872 2024-11-13T00:32:02,929 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3421acaefd2b49749f5d4a19ecf4bc66, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731457922884 2024-11-13T00:32:02,938 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): accf212f08b962d4f9ef633e1c33d603#info#compaction#72 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:32:02,939 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/cdb73f989d2d42259da06ac148c9a567 is 1080, key is row0062/info:/1731457910759/Put/seqid=0 2024-11-13T00:32:02,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741856_1032 (size=31106) 2024-11-13T00:32:02,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741856_1032 (size=31106) 2024-11-13T00:32:02,950 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/cdb73f989d2d42259da06ac148c9a567 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/cdb73f989d2d42259da06ac148c9a567 2024-11-13T00:32:02,955 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in accf212f08b962d4f9ef633e1c33d603/info of accf212f08b962d4f9ef633e1c33d603 into cdb73f989d2d42259da06ac148c9a567(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:32:02,956 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:02,956 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., storeName=accf212f08b962d4f9ef633e1c33d603/info, priority=13, startTime=1731457922926; duration=0sec 2024-11-13T00:32:02,956 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:02,956 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: accf212f08b962d4f9ef633e1c33d603:info 2024-11-13T00:32:02,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:02,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:03,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:03,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:03,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:04,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:04,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:04,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-13T00:32:04,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/faa134f6aec54d2ca00ff64016747b36 is 1080, key is row0086/info:/1731457922906/Put/seqid=0 2024-11-13T00:32:04,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741857_1033 (size=17896) 2024-11-13T00:32:04,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741857_1033 (size=17896) 2024-11-13T00:32:04,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/faa134f6aec54d2ca00ff64016747b36 2024-11-13T00:32:04,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/faa134f6aec54d2ca00ff64016747b36 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/faa134f6aec54d2ca00ff64016747b36 2024-11-13T00:32:04,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/faa134f6aec54d2ca00ff64016747b36, entries=12, sequenceid=132, filesize=17.5 K 2024-11-13T00:32:04,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for accf212f08b962d4f9ef633e1c33d603 in 24ms, sequenceid=132, compaction requested=false 2024-11-13T00:32:04,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:04,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-13T00:32:04,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/d54d34ace7e546eb8f4b4704b779a7da is 1080, key is row0098/info:/1731457924928/Put/seqid=0 2024-11-13T00:32:04,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741858_1034 (size=20078) 2024-11-13T00:32:04,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741858_1034 (size=20078) 2024-11-13T00:32:04,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=149 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/d54d34ace7e546eb8f4b4704b779a7da 2024-11-13T00:32:04,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/d54d34ace7e546eb8f4b4704b779a7da as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d54d34ace7e546eb8f4b4704b779a7da 2024-11-13T00:32:04,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d54d34ace7e546eb8f4b4704b779a7da, entries=14, sequenceid=149, filesize=19.6 K 2024-11-13T00:32:04,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=14.71 KB/15064 for accf212f08b962d4f9ef633e1c33d603 in 24ms, sequenceid=149, compaction requested=true 2024-11-13T00:32:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store accf212f08b962d4f9ef633e1c33d603:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:32:04,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:04,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:04,976 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:32:04,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-13T00:32:04,977 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 69080 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:32:04,977 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1541): accf212f08b962d4f9ef633e1c33d603/info is initiating minor compaction (all files) 2024-11-13T00:32:04,977 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of accf212f08b962d4f9ef633e1c33d603/info in TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:04,977 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/cdb73f989d2d42259da06ac148c9a567, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/faa134f6aec54d2ca00ff64016747b36, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d54d34ace7e546eb8f4b4704b779a7da] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp, totalSize=67.5 K 2024-11-13T00:32:04,978 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting cdb73f989d2d42259da06ac148c9a567, keycount=24, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1731457910759 2024-11-13T00:32:04,978 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting faa134f6aec54d2ca00ff64016747b36, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731457922906 2024-11-13T00:32:04,979 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting d54d34ace7e546eb8f4b4704b779a7da, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1731457924928 2024-11-13T00:32:04,980 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/7e82f10bf4a54ae2b092667817bafc48 is 1080, key is row0112/info:/1731457924953/Put/seqid=0 2024-11-13T00:32:04,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741859_1035 (size=21156) 2024-11-13T00:32:04,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741859_1035 (size=21156) 2024-11-13T00:32:04,985 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/7e82f10bf4a54ae2b092667817bafc48 2024-11-13T00:32:04,988 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): accf212f08b962d4f9ef633e1c33d603#info#compaction#76 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:32:04,989 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/1b6d4f2231ae44b8a3c0d4e5608606a0 is 1080, key is row0062/info:/1731457910759/Put/seqid=0 2024-11-13T00:32:04,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/7e82f10bf4a54ae2b092667817bafc48 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/7e82f10bf4a54ae2b092667817bafc48 2024-11-13T00:32:04,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741860_1036 (size=59266) 2024-11-13T00:32:04,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741860_1036 (size=59266) 2024-11-13T00:32:04,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/7e82f10bf4a54ae2b092667817bafc48, entries=15, sequenceid=167, filesize=20.7 K 2024-11-13T00:32:04,995 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=2.10 KB/2152 for accf212f08b962d4f9ef633e1c33d603 in 19ms, sequenceid=167, compaction requested=false 2024-11-13T00:32:04,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:04,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:04,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:04,999 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/1b6d4f2231ae44b8a3c0d4e5608606a0 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1b6d4f2231ae44b8a3c0d4e5608606a0 2024-11-13T00:32:05,003 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in accf212f08b962d4f9ef633e1c33d603/info of accf212f08b962d4f9ef633e1c33d603 into 1b6d4f2231ae44b8a3c0d4e5608606a0(size=57.9 K), total size for store is 78.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:32:05,003 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:05,004 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., storeName=accf212f08b962d4f9ef633e1c33d603/info, priority=13, startTime=1731457924976; duration=0sec 2024-11-13T00:32:05,004 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:05,004 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: accf212f08b962d4f9ef633e1c33d603:info 2024-11-13T00:32:05,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:05,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:05,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:06,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:06,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:06,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T00:32:06,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/147c225260d74683bf31e2d9254b97e7 is 1080, key is row0127/info:/1731457924977/Put/seqid=0 2024-11-13T00:32:06,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741861_1037 (size=12516) 2024-11-13T00:32:06,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741861_1037 (size=12516) 2024-11-13T00:32:06,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:06,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:07,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/147c225260d74683bf31e2d9254b97e7 2024-11-13T00:32:07,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/147c225260d74683bf31e2d9254b97e7 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/147c225260d74683bf31e2d9254b97e7 2024-11-13T00:32:07,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/147c225260d74683bf31e2d9254b97e7, entries=7, sequenceid=178, filesize=12.2 K 2024-11-13T00:32:07,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for accf212f08b962d4f9ef633e1c33d603 in 23ms, sequenceid=178, compaction requested=true 2024-11-13T00:32:07,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:07,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store accf212f08b962d4f9ef633e1c33d603:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:32:07,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:07,011 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:32:07,012 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92938 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:32:07,012 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1541): accf212f08b962d4f9ef633e1c33d603/info is initiating minor compaction (all files) 2024-11-13T00:32:07,012 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of accf212f08b962d4f9ef633e1c33d603/info in TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:07,012 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1b6d4f2231ae44b8a3c0d4e5608606a0, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/7e82f10bf4a54ae2b092667817bafc48, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/147c225260d74683bf31e2d9254b97e7] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp, totalSize=90.8 K 2024-11-13T00:32:07,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:07,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-13T00:32:07,012 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b6d4f2231ae44b8a3c0d4e5608606a0, keycount=50, bloomtype=ROW, size=57.9 K, encoding=NONE, compression=NONE, seqNum=149, earliestPutTs=1731457910759 2024-11-13T00:32:07,013 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e82f10bf4a54ae2b092667817bafc48, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731457924953 2024-11-13T00:32:07,013 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 147c225260d74683bf31e2d9254b97e7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1731457924977 2024-11-13T00:32:07,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/1a09ecb2c7dd4cddb5a200ad9386f332 is 1080, key is row0134/info:/1731457926988/Put/seqid=0 2024-11-13T00:32:07,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741862_1038 (size=22238) 2024-11-13T00:32:07,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741862_1038 (size=22238) 2024-11-13T00:32:07,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/1a09ecb2c7dd4cddb5a200ad9386f332 2024-11-13T00:32:07,025 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): accf212f08b962d4f9ef633e1c33d603#info#compaction#79 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:32:07,026 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/d4ed370813c44a388e863ac33bd51ee4 is 1080, key is row0062/info:/1731457910759/Put/seqid=0 2024-11-13T00:32:07,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/1a09ecb2c7dd4cddb5a200ad9386f332 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1a09ecb2c7dd4cddb5a200ad9386f332 2024-11-13T00:32:07,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741863_1039 (size=83221) 2024-11-13T00:32:07,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741863_1039 (size=83221) 2024-11-13T00:32:07,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1a09ecb2c7dd4cddb5a200ad9386f332, entries=16, sequenceid=197, filesize=21.7 K 2024-11-13T00:32:07,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=11.56 KB/11836 for accf212f08b962d4f9ef633e1c33d603 in 23ms, sequenceid=197, compaction requested=false 2024-11-13T00:32:07,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:07,036 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/d4ed370813c44a388e863ac33bd51ee4 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d4ed370813c44a388e863ac33bd51ee4 2024-11-13T00:32:07,042 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in accf212f08b962d4f9ef633e1c33d603/info of accf212f08b962d4f9ef633e1c33d603 into d4ed370813c44a388e863ac33bd51ee4(size=81.3 K), total size for store is 103.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:32:07,042 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:07,042 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., storeName=accf212f08b962d4f9ef633e1c33d603/info, priority=13, startTime=1731457927010; duration=0sec 2024-11-13T00:32:07,042 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:07,042 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: accf212f08b962d4f9ef633e1c33d603:info 2024-11-13T00:32:07,649 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-13T00:32:07,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:08,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:08,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:08,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:09,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:09,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:09,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:09,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-13T00:32:09,047 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/a1644a90af6c4991b1461d9216341c76 is 1080, key is row0150/info:/1731457927013/Put/seqid=0 2024-11-13T00:32:09,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741864_1040 (size=17906) 2024-11-13T00:32:09,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741864_1040 (size=17906) 2024-11-13T00:32:09,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/a1644a90af6c4991b1461d9216341c76 2024-11-13T00:32:09,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/a1644a90af6c4991b1461d9216341c76 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a1644a90af6c4991b1461d9216341c76 2024-11-13T00:32:09,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a1644a90af6c4991b1461d9216341c76, entries=12, sequenceid=213, filesize=17.5 K 2024-11-13T00:32:09,066 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=14.71 KB/15064 for accf212f08b962d4f9ef633e1c33d603 in 24ms, sequenceid=213, compaction requested=true 2024-11-13T00:32:09,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:09,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store accf212f08b962d4f9ef633e1c33d603:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:32:09,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:09,066 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:32:09,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:09,067 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-13T00:32:09,067 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123365 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:32:09,067 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1541): accf212f08b962d4f9ef633e1c33d603/info is initiating minor compaction (all files) 2024-11-13T00:32:09,067 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of accf212f08b962d4f9ef633e1c33d603/info in TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:09,067 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d4ed370813c44a388e863ac33bd51ee4, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1a09ecb2c7dd4cddb5a200ad9386f332, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a1644a90af6c4991b1461d9216341c76] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp, totalSize=120.5 K 2024-11-13T00:32:09,067 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting d4ed370813c44a388e863ac33bd51ee4, keycount=72, bloomtype=ROW, size=81.3 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1731457910759 2024-11-13T00:32:09,068 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1a09ecb2c7dd4cddb5a200ad9386f332, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731457926988 2024-11-13T00:32:09,068 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1644a90af6c4991b1461d9216341c76, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731457927013 2024-11-13T00:32:09,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/035fd0d937c9461bb8444bbe9d641a71 is 1080, key is row0162/info:/1731457929044/Put/seqid=0 2024-11-13T00:32:09,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741865_1041 (size=21156) 2024-11-13T00:32:09,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741865_1041 (size=21156) 2024-11-13T00:32:09,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/035fd0d937c9461bb8444bbe9d641a71 2024-11-13T00:32:09,080 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): accf212f08b962d4f9ef633e1c33d603#info#compaction#82 average throughput is 34.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:32:09,081 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/e49fd42eff1e407baeb685ba172ee35b is 1080, key is row0062/info:/1731457910759/Put/seqid=0 2024-11-13T00:32:09,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/035fd0d937c9461bb8444bbe9d641a71 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/035fd0d937c9461bb8444bbe9d641a71 2024-11-13T00:32:09,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741866_1042 (size=113515) 2024-11-13T00:32:09,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741866_1042 (size=113515) 2024-11-13T00:32:09,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/035fd0d937c9461bb8444bbe9d641a71, entries=15, sequenceid=231, filesize=20.7 K 2024-11-13T00:32:09,092 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/e49fd42eff1e407baeb685ba172ee35b as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/e49fd42eff1e407baeb685ba172ee35b 2024-11-13T00:32:09,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=13.66 KB/13988 for accf212f08b962d4f9ef633e1c33d603 in 29ms, sequenceid=231, compaction requested=false 2024-11-13T00:32:09,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:09,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:09,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-13T00:32:09,098 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in accf212f08b962d4f9ef633e1c33d603/info of accf212f08b962d4f9ef633e1c33d603 into e49fd42eff1e407baeb685ba172ee35b(size=110.9 K), total size for store is 131.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:32:09,098 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:09,098 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., storeName=accf212f08b962d4f9ef633e1c33d603/info, priority=13, startTime=1731457929066; duration=0sec 2024-11-13T00:32:09,098 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:09,098 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: accf212f08b962d4f9ef633e1c33d603:info 2024-11-13T00:32:09,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/663a9da9f767433bbc7b7a89db29bc02 is 1080, key is row0177/info:/1731457929068/Put/seqid=0 2024-11-13T00:32:09,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741867_1043 (size=20078) 2024-11-13T00:32:09,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741867_1043 (size=20078) 2024-11-13T00:32:09,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/663a9da9f767433bbc7b7a89db29bc02 2024-11-13T00:32:09,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/663a9da9f767433bbc7b7a89db29bc02 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/663a9da9f767433bbc7b7a89db29bc02 2024-11-13T00:32:09,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/663a9da9f767433bbc7b7a89db29bc02, entries=14, sequenceid=248, filesize=19.6 K 2024-11-13T00:32:09,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=2.10 KB/2152 for accf212f08b962d4f9ef633e1c33d603 in 32ms, sequenceid=248, compaction requested=true 2024-11-13T00:32:09,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:09,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store accf212f08b962d4f9ef633e1c33d603:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:32:09,129 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:09,129 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:32:09,131 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 154749 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:32:09,131 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1541): accf212f08b962d4f9ef633e1c33d603/info is initiating minor compaction (all files) 2024-11-13T00:32:09,131 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of accf212f08b962d4f9ef633e1c33d603/info in TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:09,131 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/e49fd42eff1e407baeb685ba172ee35b, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/035fd0d937c9461bb8444bbe9d641a71, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/663a9da9f767433bbc7b7a89db29bc02] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp, totalSize=151.1 K 2024-11-13T00:32:09,131 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting e49fd42eff1e407baeb685ba172ee35b, keycount=100, bloomtype=ROW, size=110.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731457910759 2024-11-13T00:32:09,132 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 035fd0d937c9461bb8444bbe9d641a71, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1731457929044 2024-11-13T00:32:09,132 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 663a9da9f767433bbc7b7a89db29bc02, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1731457929068 2024-11-13T00:32:09,145 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): accf212f08b962d4f9ef633e1c33d603#info#compaction#84 average throughput is 66.19 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:32:09,145 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/bc831392a79f48adac1b9a59580cc988 is 1080, key is row0062/info:/1731457910759/Put/seqid=0 2024-11-13T00:32:09,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741868_1044 (size=145084) 2024-11-13T00:32:09,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741868_1044 (size=145084) 2024-11-13T00:32:09,153 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/bc831392a79f48adac1b9a59580cc988 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/bc831392a79f48adac1b9a59580cc988 2024-11-13T00:32:09,159 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in accf212f08b962d4f9ef633e1c33d603/info of accf212f08b962d4f9ef633e1c33d603 into bc831392a79f48adac1b9a59580cc988(size=141.7 K), total size for store is 141.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:32:09,159 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:09,159 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., storeName=accf212f08b962d4f9ef633e1c33d603/info, priority=13, startTime=1731457929129; duration=0sec 2024-11-13T00:32:09,159 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:09,159 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: accf212f08b962d4f9ef633e1c33d603:info 2024-11-13T00:32:09,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:10,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:10,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:10,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:11,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:11,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:11,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:11,121 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-13T00:32:11,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/f0e041a4d7904c3f827a5a1218ba3768 is 1080, key is row0191/info:/1731457929098/Put/seqid=0 2024-11-13T00:32:11,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741869_1045 (size=12520) 2024-11-13T00:32:11,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741869_1045 (size=12520) 2024-11-13T00:32:11,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/f0e041a4d7904c3f827a5a1218ba3768 2024-11-13T00:32:11,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/f0e041a4d7904c3f827a5a1218ba3768 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/f0e041a4d7904c3f827a5a1218ba3768 2024-11-13T00:32:11,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/f0e041a4d7904c3f827a5a1218ba3768, entries=7, sequenceid=260, filesize=12.2 K 2024-11-13T00:32:11,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for accf212f08b962d4f9ef633e1c33d603 in 25ms, sequenceid=260, compaction requested=false 2024-11-13T00:32:11,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:11,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:11,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-13T00:32:11,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/2262ec92aca4479db776fb55f85ccf32 is 1080, key is row0198/info:/1731457931122/Put/seqid=0 2024-11-13T00:32:11,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741870_1046 (size=23333) 2024-11-13T00:32:11,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741870_1046 (size=23333) 2024-11-13T00:32:11,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/2262ec92aca4479db776fb55f85ccf32 2024-11-13T00:32:11,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/2262ec92aca4479db776fb55f85ccf32 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/2262ec92aca4479db776fb55f85ccf32 2024-11-13T00:32:11,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/2262ec92aca4479db776fb55f85ccf32, entries=17, sequenceid=280, filesize=22.8 K 2024-11-13T00:32:11,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=10.51 KB/10760 for accf212f08b962d4f9ef633e1c33d603 in 26ms, sequenceid=280, compaction requested=true 2024-11-13T00:32:11,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:11,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store accf212f08b962d4f9ef633e1c33d603:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:32:11,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:11,173 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:32:11,174 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 180937 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:32:11,174 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1541): accf212f08b962d4f9ef633e1c33d603/info is initiating minor compaction (all files) 2024-11-13T00:32:11,174 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of accf212f08b962d4f9ef633e1c33d603/info in TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:11,174 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/bc831392a79f48adac1b9a59580cc988, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/f0e041a4d7904c3f827a5a1218ba3768, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/2262ec92aca4479db776fb55f85ccf32] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp, totalSize=176.7 K 2024-11-13T00:32:11,174 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting bc831392a79f48adac1b9a59580cc988, keycount=129, bloomtype=ROW, size=141.7 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1731457910759 2024-11-13T00:32:11,175 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting f0e041a4d7904c3f827a5a1218ba3768, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1731457929098 2024-11-13T00:32:11,175 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2262ec92aca4479db776fb55f85ccf32, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1731457931122 2024-11-13T00:32:11,185 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): accf212f08b962d4f9ef633e1c33d603#info#compaction#87 average throughput is 52.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:32:11,186 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/9ced62f8baa740d996a3a39d27ddad1d is 1080, key is row0062/info:/1731457910759/Put/seqid=0 2024-11-13T00:32:11,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741871_1047 (size=171087) 2024-11-13T00:32:11,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741871_1047 (size=171087) 2024-11-13T00:32:11,193 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/9ced62f8baa740d996a3a39d27ddad1d as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/9ced62f8baa740d996a3a39d27ddad1d 2024-11-13T00:32:11,198 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in accf212f08b962d4f9ef633e1c33d603/info of accf212f08b962d4f9ef633e1c33d603 into 9ced62f8baa740d996a3a39d27ddad1d(size=167.1 K), total size for store is 167.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:32:11,198 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:11,198 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., storeName=accf212f08b962d4f9ef633e1c33d603/info, priority=13, startTime=1731457931173; duration=0sec 2024-11-13T00:32:11,198 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:11,198 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: accf212f08b962d4f9ef633e1c33d603:info 2024-11-13T00:32:11,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:12,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:12,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:12,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:13,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:13,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:13,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-13T00:32:13,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/ba80950b83a2484b8dda8a665e809b33 is 1080, key is row0215/info:/1731457931149/Put/seqid=0 2024-11-13T00:32:13,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741872_1048 (size=16839) 2024-11-13T00:32:13,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741872_1048 (size=16839) 2024-11-13T00:32:13,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/ba80950b83a2484b8dda8a665e809b33 2024-11-13T00:32:13,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/ba80950b83a2484b8dda8a665e809b33 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/ba80950b83a2484b8dda8a665e809b33 2024-11-13T00:32:13,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/ba80950b83a2484b8dda8a665e809b33, entries=11, sequenceid=295, filesize=16.4 K 2024-11-13T00:32:13,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for accf212f08b962d4f9ef633e1c33d603 in 21ms, sequenceid=295, compaction requested=false 2024-11-13T00:32:13,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:13,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:13,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-13T00:32:13,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/edb4dafa16c94c0f8fdb77f90382da45 is 1080, key is row0226/info:/1731457933167/Put/seqid=0 2024-11-13T00:32:13,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741873_1049 (size=20092) 2024-11-13T00:32:13,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741873_1049 (size=20092) 2024-11-13T00:32:13,197 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/edb4dafa16c94c0f8fdb77f90382da45 2024-11-13T00:32:13,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/edb4dafa16c94c0f8fdb77f90382da45 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/edb4dafa16c94c0f8fdb77f90382da45 2024-11-13T00:32:13,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/edb4dafa16c94c0f8fdb77f90382da45, entries=14, sequenceid=312, filesize=19.6 K 2024-11-13T00:32:13,207 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for accf212f08b962d4f9ef633e1c33d603 in 20ms, sequenceid=312, compaction requested=true 2024-11-13T00:32:13,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:13,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store accf212f08b962d4f9ef633e1c33d603:info, priority=-2147483648, current under compaction store size is 1 2024-11-13T00:32:13,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:13,207 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-13T00:32:13,208 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 208018 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-13T00:32:13,208 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1541): accf212f08b962d4f9ef633e1c33d603/info is initiating minor compaction (all files) 2024-11-13T00:32:13,208 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of accf212f08b962d4f9ef633e1c33d603/info in TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:13,208 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/9ced62f8baa740d996a3a39d27ddad1d, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/ba80950b83a2484b8dda8a665e809b33, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/edb4dafa16c94c0f8fdb77f90382da45] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp, totalSize=203.1 K 2024-11-13T00:32:13,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35989 {}] regionserver.HRegion(8855): Flush requested on accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:13,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-13T00:32:13,209 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9ced62f8baa740d996a3a39d27ddad1d, keycount=153, bloomtype=ROW, size=167.1 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1731457910759 2024-11-13T00:32:13,209 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting ba80950b83a2484b8dda8a665e809b33, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1731457931149 2024-11-13T00:32:13,209 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] compactions.Compactor(225): Compacting edb4dafa16c94c0f8fdb77f90382da45, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1731457933167 2024-11-13T00:32:13,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/f218764ee26f415cad7bc602d60c5246 is 1080, key is row0240/info:/1731457933188/Put/seqid=0 2024-11-13T00:32:13,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741874_1050 (size=20092) 2024-11-13T00:32:13,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741874_1050 (size=20092) 2024-11-13T00:32:13,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/f218764ee26f415cad7bc602d60c5246 2024-11-13T00:32:13,223 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): accf212f08b962d4f9ef633e1c33d603#info#compaction#91 average throughput is 60.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-13T00:32:13,223 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/8f173b3ee481424db5142ea4de65a370 is 1080, key is row0062/info:/1731457910759/Put/seqid=0 2024-11-13T00:32:13,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741875_1051 (size=198168) 2024-11-13T00:32:13,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741875_1051 (size=198168) 2024-11-13T00:32:13,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/f218764ee26f415cad7bc602d60c5246 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/f218764ee26f415cad7bc602d60c5246 2024-11-13T00:32:13,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/f218764ee26f415cad7bc602d60c5246, entries=14, sequenceid=329, filesize=19.6 K 2024-11-13T00:32:13,232 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/8f173b3ee481424db5142ea4de65a370 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/8f173b3ee481424db5142ea4de65a370 2024-11-13T00:32:13,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=3.15 KB/3228 for accf212f08b962d4f9ef633e1c33d603 in 25ms, sequenceid=329, compaction requested=false 2024-11-13T00:32:13,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:13,238 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in accf212f08b962d4f9ef633e1c33d603/info of accf212f08b962d4f9ef633e1c33d603 into 8f173b3ee481424db5142ea4de65a370(size=193.5 K), total size for store is 213.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-13T00:32:13,238 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:13,238 INFO [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., storeName=accf212f08b962d4f9ef633e1c33d603/info, priority=13, startTime=1731457933207; duration=0sec 2024-11-13T00:32:13,238 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-13T00:32:13,238 DEBUG [RS:0;cb28580903fb:35989-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: accf212f08b962d4f9ef633e1c33d603:info 2024-11-13T00:32:13,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:14,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:14,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:14,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:15,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:15,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:15,214 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-13T00:32:15,214 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35989%2C1731457897712.1731457935214 2024-11-13T00:32:15,219 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,219 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,219 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,219 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,220 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,220 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712/cb28580903fb%2C35989%2C1731457897712.1731457898089 with entries=313, filesize=308.60 KB; new WAL /user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712/cb28580903fb%2C35989%2C1731457897712.1731457935214 2024-11-13T00:32:15,221 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38763:38763),(127.0.0.1/127.0.0.1:37755:37755)] 2024-11-13T00:32:15,221 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712/cb28580903fb%2C35989%2C1731457897712.1731457898089 is not closed yet, will try archiving it next time 2024-11-13T00:32:15,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741833_1009 (size=316019) 2024-11-13T00:32:15,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741833_1009 (size=316019) 2024-11-13T00:32:15,225 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing accf212f08b962d4f9ef633e1c33d603 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-13T00:32:15,228 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/3708a831d5584a999388ed9fe1f4d297 is 1080, key is row0254/info:/1731457933209/Put/seqid=0 2024-11-13T00:32:15,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741877_1053 (size=8199) 2024-11-13T00:32:15,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741877_1053 (size=8199) 2024-11-13T00:32:15,233 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=336 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/3708a831d5584a999388ed9fe1f4d297 2024-11-13T00:32:15,238 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/.tmp/info/3708a831d5584a999388ed9fe1f4d297 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/3708a831d5584a999388ed9fe1f4d297 2024-11-13T00:32:15,242 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/3708a831d5584a999388ed9fe1f4d297, entries=3, sequenceid=336, filesize=8.0 K 2024-11-13T00:32:15,242 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for accf212f08b962d4f9ef633e1c33d603 in 17ms, sequenceid=336, compaction requested=true 2024-11-13T00:32:15,243 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for accf212f08b962d4f9ef633e1c33d603: 2024-11-13T00:32:15,243 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7b90f83fab0270206e7a13b40fbba0e2: 2024-11-13T00:32:15,243 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-13T00:32:15,246 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/info/6fd002bdda764acd87cd7e76692f811e is 186, key is TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2./info:regioninfo/1731457911495/Put/seqid=0 2024-11-13T00:32:15,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741878_1054 (size=6153) 2024-11-13T00:32:15,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741878_1054 (size=6153) 2024-11-13T00:32:15,252 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/info/6fd002bdda764acd87cd7e76692f811e 2024-11-13T00:32:15,257 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/.tmp/info/6fd002bdda764acd87cd7e76692f811e as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/info/6fd002bdda764acd87cd7e76692f811e 2024-11-13T00:32:15,261 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/info/6fd002bdda764acd87cd7e76692f811e, entries=5, sequenceid=21, filesize=6.0 K 2024-11-13T00:32:15,262 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 19ms, sequenceid=21, compaction requested=false 2024-11-13T00:32:15,262 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-13T00:32:15,262 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C35989%2C1731457897712.1731457935262 2024-11-13T00:32:15,266 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,266 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,266 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,266 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,266 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,266 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712/cb28580903fb%2C35989%2C1731457897712.1731457935214 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712/cb28580903fb%2C35989%2C1731457897712.1731457935262 2024-11-13T00:32:15,267 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37755:37755),(127.0.0.1/127.0.0.1:38763:38763)] 2024-11-13T00:32:15,267 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712/cb28580903fb%2C35989%2C1731457897712.1731457935214 is not closed yet, will try archiving it next time 2024-11-13T00:32:15,267 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712/cb28580903fb%2C35989%2C1731457897712.1731457898089 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/oldWALs/cb28580903fb%2C35989%2C1731457897712.1731457898089 2024-11-13T00:32:15,268 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-13T00:32:15,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741876_1052 (size=731) 2024-11-13T00:32:15,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741876_1052 (size=731) 2024-11-13T00:32:15,269 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/WALs/cb28580903fb,35989,1731457897712/cb28580903fb%2C35989%2C1731457897712.1731457935214 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/oldWALs/cb28580903fb%2C35989%2C1731457897712.1731457935214 2024-11-13T00:32:15,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T00:32:15,369 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:32:15,369 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:32:15,369 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:15,369 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:15,369 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T00:32:15,369 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T00:32:15,369 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2068148996, stopped=false 2024-11-13T00:32:15,369 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=cb28580903fb,39625,1731457897665 2024-11-13T00:32:15,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:32:15,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:32:15,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:15,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:15,371 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:32:15,371 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:32:15,372 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:32:15,372 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:15,372 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:32:15,372 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'cb28580903fb,35989,1731457897712' ***** 2024-11-13T00:32:15,372 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T00:32:15,372 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:32:15,372 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T00:32:15,372 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(3091): Received CLOSE for accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(3091): Received CLOSE for 7b90f83fab0270206e7a13b40fbba0e2 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(959): stopping server cb28580903fb,35989,1731457897712 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;cb28580903fb:35989. 2024-11-13T00:32:15,373 DEBUG [RS:0;cb28580903fb:35989 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:32:15,373 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing accf212f08b962d4f9ef633e1c33d603, disabling compactions & flushes 2024-11-13T00:32:15,373 DEBUG [RS:0;cb28580903fb:35989 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:15,373 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:15,373 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:15,373 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. after waiting 0 ms 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T00:32:15,373 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T00:32:15,373 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-13T00:32:15,373 DEBUG [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(1325): Online Regions={accf212f08b962d4f9ef633e1c33d603=TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603., 7b90f83fab0270206e7a13b40fbba0e2=TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2., 1588230740=hbase:meta,,1.1588230740} 2024-11-13T00:32:15,373 DEBUG [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7b90f83fab0270206e7a13b40fbba0e2, accf212f08b962d4f9ef633e1c33d603 2024-11-13T00:32:15,373 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:32:15,373 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:32:15,373 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:32:15,373 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:32:15,373 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:32:15,373 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0->hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d2f57a9da59c428a9f552b25a2e381fa-top, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/38d9257cdb684588a9036afc22f0d10c, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/TestLogRolling-testLogRolling=d7ca01440872ba9aaac8181c0fcee4c0-9e7a8726f4384cc88eaa9ecd115fcc8e, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a722e576a4874194babf899d4cc640cb, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/cdb73f989d2d42259da06ac148c9a567, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/3421acaefd2b49749f5d4a19ecf4bc66, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/faa134f6aec54d2ca00ff64016747b36, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1b6d4f2231ae44b8a3c0d4e5608606a0, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d54d34ace7e546eb8f4b4704b779a7da, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/7e82f10bf4a54ae2b092667817bafc48, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d4ed370813c44a388e863ac33bd51ee4, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/147c225260d74683bf31e2d9254b97e7, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1a09ecb2c7dd4cddb5a200ad9386f332, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/e49fd42eff1e407baeb685ba172ee35b, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a1644a90af6c4991b1461d9216341c76, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/035fd0d937c9461bb8444bbe9d641a71, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/bc831392a79f48adac1b9a59580cc988, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/663a9da9f767433bbc7b7a89db29bc02, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/f0e041a4d7904c3f827a5a1218ba3768, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/9ced62f8baa740d996a3a39d27ddad1d, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/2262ec92aca4479db776fb55f85ccf32, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/ba80950b83a2484b8dda8a665e809b33, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/edb4dafa16c94c0f8fdb77f90382da45] to archive 2024-11-13T00:32:15,375 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T00:32:15,377 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:32:15,378 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/38d9257cdb684588a9036afc22f0d10c to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/38d9257cdb684588a9036afc22f0d10c 2024-11-13T00:32:15,378 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-13T00:32:15,379 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:32:15,379 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:32:15,379 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457935373Running coprocessor pre-close hooks at 1731457935373Disabling compacts and flushes for region at 1731457935373Disabling writes for close at 1731457935373Writing region close event to WAL at 1731457935375 (+2 ms)Running coprocessor post-close hooks at 1731457935379 (+4 ms)Closed at 1731457935379 2024-11-13T00:32:15,379 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T00:32:15,379 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/TestLogRolling-testLogRolling=d7ca01440872ba9aaac8181c0fcee4c0-9e7a8726f4384cc88eaa9ecd115fcc8e to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/TestLogRolling-testLogRolling=d7ca01440872ba9aaac8181c0fcee4c0-9e7a8726f4384cc88eaa9ecd115fcc8e 2024-11-13T00:32:15,380 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a722e576a4874194babf899d4cc640cb to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a722e576a4874194babf899d4cc640cb 2024-11-13T00:32:15,381 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/cdb73f989d2d42259da06ac148c9a567 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/cdb73f989d2d42259da06ac148c9a567 2024-11-13T00:32:15,382 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/3421acaefd2b49749f5d4a19ecf4bc66 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/3421acaefd2b49749f5d4a19ecf4bc66 2024-11-13T00:32:15,383 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/faa134f6aec54d2ca00ff64016747b36 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/faa134f6aec54d2ca00ff64016747b36 2024-11-13T00:32:15,384 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1b6d4f2231ae44b8a3c0d4e5608606a0 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1b6d4f2231ae44b8a3c0d4e5608606a0 2024-11-13T00:32:15,385 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d54d34ace7e546eb8f4b4704b779a7da to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d54d34ace7e546eb8f4b4704b779a7da 2024-11-13T00:32:15,386 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/7e82f10bf4a54ae2b092667817bafc48 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/7e82f10bf4a54ae2b092667817bafc48 2024-11-13T00:32:15,387 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d4ed370813c44a388e863ac33bd51ee4 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/d4ed370813c44a388e863ac33bd51ee4 2024-11-13T00:32:15,388 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/147c225260d74683bf31e2d9254b97e7 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/147c225260d74683bf31e2d9254b97e7 2024-11-13T00:32:15,389 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1a09ecb2c7dd4cddb5a200ad9386f332 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/1a09ecb2c7dd4cddb5a200ad9386f332 2024-11-13T00:32:15,390 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/e49fd42eff1e407baeb685ba172ee35b to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/e49fd42eff1e407baeb685ba172ee35b 2024-11-13T00:32:15,391 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a1644a90af6c4991b1461d9216341c76 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/a1644a90af6c4991b1461d9216341c76 2024-11-13T00:32:15,392 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/035fd0d937c9461bb8444bbe9d641a71 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/035fd0d937c9461bb8444bbe9d641a71 2024-11-13T00:32:15,393 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/bc831392a79f48adac1b9a59580cc988 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/bc831392a79f48adac1b9a59580cc988 2024-11-13T00:32:15,393 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/663a9da9f767433bbc7b7a89db29bc02 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/663a9da9f767433bbc7b7a89db29bc02 2024-11-13T00:32:15,394 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/f0e041a4d7904c3f827a5a1218ba3768 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/f0e041a4d7904c3f827a5a1218ba3768 2024-11-13T00:32:15,395 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/9ced62f8baa740d996a3a39d27ddad1d to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/9ced62f8baa740d996a3a39d27ddad1d 2024-11-13T00:32:15,396 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/2262ec92aca4479db776fb55f85ccf32 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/2262ec92aca4479db776fb55f85ccf32 2024-11-13T00:32:15,397 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/ba80950b83a2484b8dda8a665e809b33 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/ba80950b83a2484b8dda8a665e809b33 2024-11-13T00:32:15,398 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/edb4dafa16c94c0f8fdb77f90382da45 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/info/edb4dafa16c94c0f8fdb77f90382da45 2024-11-13T00:32:15,398 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=cb28580903fb:39625 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-13T00:32:15,399 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [38d9257cdb684588a9036afc22f0d10c=8359, a722e576a4874194babf899d4cc640cb=12509, cdb73f989d2d42259da06ac148c9a567=31106, 3421acaefd2b49749f5d4a19ecf4bc66=20064, faa134f6aec54d2ca00ff64016747b36=17896, 1b6d4f2231ae44b8a3c0d4e5608606a0=59266, d54d34ace7e546eb8f4b4704b779a7da=20078, 7e82f10bf4a54ae2b092667817bafc48=21156, d4ed370813c44a388e863ac33bd51ee4=83221, 147c225260d74683bf31e2d9254b97e7=12516, 1a09ecb2c7dd4cddb5a200ad9386f332=22238, e49fd42eff1e407baeb685ba172ee35b=113515, a1644a90af6c4991b1461d9216341c76=17906, 035fd0d937c9461bb8444bbe9d641a71=21156, bc831392a79f48adac1b9a59580cc988=145084, 663a9da9f767433bbc7b7a89db29bc02=20078, f0e041a4d7904c3f827a5a1218ba3768=12520, 9ced62f8baa740d996a3a39d27ddad1d=171087, 2262ec92aca4479db776fb55f85ccf32=23333, ba80950b83a2484b8dda8a665e809b33=16839, edb4dafa16c94c0f8fdb77f90382da45=20092] 2024-11-13T00:32:15,402 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/accf212f08b962d4f9ef633e1c33d603/recovered.edits/339.seqid, newMaxSeqId=339, maxSeqId=88 2024-11-13T00:32:15,402 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:15,402 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for accf212f08b962d4f9ef633e1c33d603: Waiting for close lock at 1731457935373Running coprocessor pre-close hooks at 1731457935373Disabling compacts and flushes for region at 1731457935373Disabling writes for close at 1731457935373Writing region close event to WAL at 1731457935399 (+26 ms)Running coprocessor post-close hooks at 1731457935402 (+3 ms)Closed at 1731457935402 2024-11-13T00:32:15,402 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731457910825.accf212f08b962d4f9ef633e1c33d603. 2024-11-13T00:32:15,403 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7b90f83fab0270206e7a13b40fbba0e2, disabling compactions & flushes 2024-11-13T00:32:15,403 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. 2024-11-13T00:32:15,403 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. 2024-11-13T00:32:15,403 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. after waiting 0 ms 2024-11-13T00:32:15,403 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. 2024-11-13T00:32:15,403 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0->hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/d7ca01440872ba9aaac8181c0fcee4c0/info/d2f57a9da59c428a9f552b25a2e381fa-bottom] to archive 2024-11-13T00:32:15,404 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-13T00:32:15,405 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0 to hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/archive/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/info/d2f57a9da59c428a9f552b25a2e381fa.d7ca01440872ba9aaac8181c0fcee4c0 2024-11-13T00:32:15,405 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-13T00:32:15,408 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/data/default/TestLogRolling-testLogRolling/7b90f83fab0270206e7a13b40fbba0e2/recovered.edits/93.seqid, newMaxSeqId=93, maxSeqId=88 2024-11-13T00:32:15,408 INFO [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. 2024-11-13T00:32:15,408 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7b90f83fab0270206e7a13b40fbba0e2: Waiting for close lock at 1731457935403Running coprocessor pre-close hooks at 1731457935403Disabling compacts and flushes for region at 1731457935403Disabling writes for close at 1731457935403Writing region close event to WAL at 1731457935405 (+2 ms)Running coprocessor post-close hooks at 1731457935408 (+3 ms)Closed at 1731457935408 2024-11-13T00:32:15,408 DEBUG [RS_CLOSE_REGION-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731457910825.7b90f83fab0270206e7a13b40fbba0e2. 2024-11-13T00:32:15,574 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(976): stopping server cb28580903fb,35989,1731457897712; all regions closed. 2024-11-13T00:32:15,574 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,574 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,574 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,574 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,574 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741834_1010 (size=8107) 2024-11-13T00:32:15,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741834_1010 (size=8107) 2024-11-13T00:32:15,579 DEBUG [RS:0;cb28580903fb:35989 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/oldWALs 2024-11-13T00:32:15,579 INFO [RS:0;cb28580903fb:35989 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C35989%2C1731457897712.meta:.meta(num 1731457898450) 2024-11-13T00:32:15,579 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,579 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,579 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,579 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,579 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741879_1055 (size=778) 2024-11-13T00:32:15,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741879_1055 (size=778) 2024-11-13T00:32:15,583 DEBUG [RS:0;cb28580903fb:35989 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/oldWALs 2024-11-13T00:32:15,583 INFO [RS:0;cb28580903fb:35989 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C35989%2C1731457897712:(num 1731457935262) 2024-11-13T00:32:15,583 DEBUG [RS:0;cb28580903fb:35989 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:15,583 INFO [RS:0;cb28580903fb:35989 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:32:15,583 INFO [RS:0;cb28580903fb:35989 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:32:15,583 INFO [RS:0;cb28580903fb:35989 {}] hbase.ChoreService(370): Chore service for: regionserver/cb28580903fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T00:32:15,583 INFO [RS:0;cb28580903fb:35989 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:32:15,583 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:32:15,583 INFO [RS:0;cb28580903fb:35989 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35989 2024-11-13T00:32:15,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/cb28580903fb,35989,1731457897712 2024-11-13T00:32:15,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:32:15,585 INFO [RS:0;cb28580903fb:35989 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:32:15,586 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [cb28580903fb,35989,1731457897712] 2024-11-13T00:32:15,588 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/cb28580903fb,35989,1731457897712 already deleted, retry=false 2024-11-13T00:32:15,589 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; cb28580903fb,35989,1731457897712 expired; onlineServers=0 2024-11-13T00:32:15,589 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'cb28580903fb,39625,1731457897665' ***** 2024-11-13T00:32:15,589 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T00:32:15,589 INFO [M:0;cb28580903fb:39625 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:32:15,589 INFO [M:0;cb28580903fb:39625 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:32:15,589 DEBUG [M:0;cb28580903fb:39625 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T00:32:15,589 DEBUG [M:0;cb28580903fb:39625 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T00:32:15,589 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T00:32:15,589 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457897852 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457897852,5,FailOnTimeoutGroup] 2024-11-13T00:32:15,589 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457897852 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457897852,5,FailOnTimeoutGroup] 2024-11-13T00:32:15,589 INFO [M:0;cb28580903fb:39625 {}] hbase.ChoreService(370): Chore service for: master/cb28580903fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T00:32:15,589 INFO [M:0;cb28580903fb:39625 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:32:15,589 DEBUG [M:0;cb28580903fb:39625 {}] master.HMaster(1795): Stopping service threads 2024-11-13T00:32:15,589 INFO [M:0;cb28580903fb:39625 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T00:32:15,590 INFO [M:0;cb28580903fb:39625 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:32:15,590 INFO [M:0;cb28580903fb:39625 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T00:32:15,590 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T00:32:15,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T00:32:15,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:15,591 DEBUG [M:0;cb28580903fb:39625 {}] zookeeper.ZKUtil(347): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T00:32:15,591 WARN [M:0;cb28580903fb:39625 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T00:32:15,591 INFO [M:0;cb28580903fb:39625 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/.lastflushedseqids 2024-11-13T00:32:15,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741880_1056 (size=228) 2024-11-13T00:32:15,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741880_1056 (size=228) 2024-11-13T00:32:15,596 INFO [M:0;cb28580903fb:39625 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T00:32:15,596 INFO [M:0;cb28580903fb:39625 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T00:32:15,596 DEBUG [M:0;cb28580903fb:39625 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:32:15,597 INFO [M:0;cb28580903fb:39625 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:15,597 DEBUG [M:0;cb28580903fb:39625 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:15,597 DEBUG [M:0;cb28580903fb:39625 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:32:15,597 DEBUG [M:0;cb28580903fb:39625 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:15,597 INFO [M:0;cb28580903fb:39625 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-13T00:32:15,613 DEBUG [M:0;cb28580903fb:39625 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b40f71387f5c461b8c7d81e86bc677e7 is 82, key is hbase:meta,,1/info:regioninfo/1731457898473/Put/seqid=0 2024-11-13T00:32:15,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741881_1057 (size=5672) 2024-11-13T00:32:15,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741881_1057 (size=5672) 2024-11-13T00:32:15,617 INFO [M:0;cb28580903fb:39625 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b40f71387f5c461b8c7d81e86bc677e7 2024-11-13T00:32:15,643 DEBUG [M:0;cb28580903fb:39625 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f0dc64cc614d44e6962006bb60b0b634 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731457898901/Put/seqid=0 2024-11-13T00:32:15,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741882_1058 (size=7090) 2024-11-13T00:32:15,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741882_1058 (size=7090) 2024-11-13T00:32:15,647 INFO [M:0;cb28580903fb:39625 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f0dc64cc614d44e6962006bb60b0b634 2024-11-13T00:32:15,651 INFO [M:0;cb28580903fb:39625 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f0dc64cc614d44e6962006bb60b0b634 2024-11-13T00:32:15,665 DEBUG [M:0;cb28580903fb:39625 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a42a2d372e0e49a6bfb44edcdf639d28 is 69, key is cb28580903fb,35989,1731457897712/rs:state/1731457897946/Put/seqid=0 2024-11-13T00:32:15,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741883_1059 (size=5156) 2024-11-13T00:32:15,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741883_1059 (size=5156) 2024-11-13T00:32:15,669 INFO [M:0;cb28580903fb:39625 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a42a2d372e0e49a6bfb44edcdf639d28 2024-11-13T00:32:15,686 DEBUG [M:0;cb28580903fb:39625 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4e47683a86d0400caf70b67f131ab0c6 is 52, key is load_balancer_on/state:d/1731457898535/Put/seqid=0 2024-11-13T00:32:15,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:32:15,687 INFO [RS:0;cb28580903fb:35989 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:32:15,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35989-0x1010ebcbf430001, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:32:15,687 INFO [RS:0;cb28580903fb:35989 {}] regionserver.HRegionServer(1031): Exiting; stopping=cb28580903fb,35989,1731457897712; zookeeper connection closed. 2024-11-13T00:32:15,687 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7f7e0575 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7f7e0575 2024-11-13T00:32:15,687 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T00:32:15,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741884_1060 (size=5056) 2024-11-13T00:32:15,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741884_1060 (size=5056) 2024-11-13T00:32:15,691 INFO [M:0;cb28580903fb:39625 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4e47683a86d0400caf70b67f131ab0c6 2024-11-13T00:32:15,694 DEBUG [M:0;cb28580903fb:39625 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b40f71387f5c461b8c7d81e86bc677e7 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b40f71387f5c461b8c7d81e86bc677e7 2024-11-13T00:32:15,698 INFO [M:0;cb28580903fb:39625 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b40f71387f5c461b8c7d81e86bc677e7, entries=8, sequenceid=125, filesize=5.5 K 2024-11-13T00:32:15,699 DEBUG [M:0;cb28580903fb:39625 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f0dc64cc614d44e6962006bb60b0b634 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f0dc64cc614d44e6962006bb60b0b634 2024-11-13T00:32:15,702 INFO [M:0;cb28580903fb:39625 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f0dc64cc614d44e6962006bb60b0b634 2024-11-13T00:32:15,702 INFO [M:0;cb28580903fb:39625 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f0dc64cc614d44e6962006bb60b0b634, entries=13, sequenceid=125, filesize=6.9 K 2024-11-13T00:32:15,703 DEBUG [M:0;cb28580903fb:39625 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a42a2d372e0e49a6bfb44edcdf639d28 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a42a2d372e0e49a6bfb44edcdf639d28 2024-11-13T00:32:15,706 INFO [M:0;cb28580903fb:39625 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a42a2d372e0e49a6bfb44edcdf639d28, entries=1, sequenceid=125, filesize=5.0 K 2024-11-13T00:32:15,707 DEBUG [M:0;cb28580903fb:39625 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4e47683a86d0400caf70b67f131ab0c6 as hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4e47683a86d0400caf70b67f131ab0c6 2024-11-13T00:32:15,710 INFO [M:0;cb28580903fb:39625 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/76ec3a3a-8b1b-41a6-dd77-e5cd0ba78132/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4e47683a86d0400caf70b67f131ab0c6, entries=1, sequenceid=125, filesize=4.9 K 2024-11-13T00:32:15,711 INFO [M:0;cb28580903fb:39625 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=125, compaction requested=false 2024-11-13T00:32:15,712 INFO [M:0;cb28580903fb:39625 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:15,712 DEBUG [M:0;cb28580903fb:39625 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457935596Disabling compacts and flushes for region at 1731457935596Disabling writes for close at 1731457935597 (+1 ms)Obtaining lock to block concurrent updates at 1731457935597Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731457935597Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731457935597Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731457935598 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731457935598Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731457935612 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731457935612Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731457935622 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731457935642 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731457935642Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731457935651 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731457935664 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731457935664Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731457935673 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731457935686 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731457935686Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b62710a: reopening flushed file at 1731457935694 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4675e716: reopening flushed file at 1731457935698 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9d59a99: reopening flushed file at 1731457935702 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@739bf4cb: reopening flushed file at 1731457935706 (+4 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=125, compaction requested=false at 1731457935711 (+5 ms)Writing region close event to WAL at 1731457935712 (+1 ms)Closed at 1731457935712 2024-11-13T00:32:15,713 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,713 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,713 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,713 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,713 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:15,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43865 is added to blk_1073741830_1006 (size=61320) 2024-11-13T00:32:15,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46003 is added to blk_1073741830_1006 (size=61320) 2024-11-13T00:32:15,715 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:32:15,715 INFO [M:0;cb28580903fb:39625 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T00:32:15,716 INFO [M:0;cb28580903fb:39625 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39625 2024-11-13T00:32:15,716 INFO [M:0;cb28580903fb:39625 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:32:15,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:15,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:32:15,817 INFO [M:0;cb28580903fb:39625 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:32:15,817 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39625-0x1010ebcbf430000, quorum=127.0.0.1:63834, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:32:15,820 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d183f58{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:32:15,821 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72db48b3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:32:15,821 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:32:15,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c84a90f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:32:15,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@602a7b70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/hadoop.log.dir/,STOPPED} 2024-11-13T00:32:15,822 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:32:15,822 WARN [BP-487134543-172.17.0.2-1731457896957 heartbeating to localhost/127.0.0.1:44083 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:32:15,822 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:32:15,822 WARN [BP-487134543-172.17.0.2-1731457896957 heartbeating to localhost/127.0.0.1:44083 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-487134543-172.17.0.2-1731457896957 (Datanode Uuid 58564819-483e-4563-98b6-090c7bbe77ce) service to localhost/127.0.0.1:44083 2024-11-13T00:32:15,823 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/data/data3/current/BP-487134543-172.17.0.2-1731457896957 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:32:15,823 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/data/data4/current/BP-487134543-172.17.0.2-1731457896957 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:32:15,823 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:32:15,825 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ebf55d8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:32:15,825 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10460741{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:32:15,825 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:32:15,825 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@42326ea1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:32:15,826 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@529d096e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/hadoop.log.dir/,STOPPED} 2024-11-13T00:32:15,827 WARN [BP-487134543-172.17.0.2-1731457896957 heartbeating to localhost/127.0.0.1:44083 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:32:15,827 WARN [BP-487134543-172.17.0.2-1731457896957 heartbeating to localhost/127.0.0.1:44083 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-487134543-172.17.0.2-1731457896957 (Datanode Uuid 5b2e41f1-2960-4af4-a5e7-9d9852fd9250) service to localhost/127.0.0.1:44083 2024-11-13T00:32:15,827 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:32:15,827 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:32:15,827 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/data/data1/current/BP-487134543-172.17.0.2-1731457896957 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:32:15,828 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/cluster_d08ed4d9-c3e3-0405-2e05-46334e65a045/data/data2/current/BP-487134543-172.17.0.2-1731457896957 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:32:15,828 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:32:15,833 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32d04f87{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:32:15,834 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22c7a053{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:32:15,834 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:32:15,834 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5410bc8b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:32:15,834 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3712ee8f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/hadoop.log.dir/,STOPPED} 2024-11-13T00:32:15,841 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T00:32:15,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T00:32:15,876 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=233 (was 209) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:44083 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:44083 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44083 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:44083 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44083 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44083 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44083 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:44083 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=61 (was 94), ProcessCount=11 (was 11), AvailableMemoryMB=1190 (was 1243) 2024-11-13T00:32:15,884 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=233, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=61, ProcessCount=11, AvailableMemoryMB=1190 2024-11-13T00:32:15,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-13T00:32:15,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/hadoop.log.dir so I do NOT create it in target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d 2024-11-13T00:32:15,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ff82c900-794d-15f5-f48c-9e4d638c09e2/hadoop.tmp.dir so I do NOT create it in target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d 2024-11-13T00:32:15,884 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a, deleteOnExit=true 2024-11-13T00:32:15,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-13T00:32:15,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/test.cache.data in system properties and HBase conf 2024-11-13T00:32:15,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/hadoop.tmp.dir in system properties and HBase conf 2024-11-13T00:32:15,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/hadoop.log.dir in system properties and HBase conf 2024-11-13T00:32:15,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-13T00:32:15,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-13T00:32:15,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-13T00:32:15,885 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-13T00:32:15,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:32:15,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-13T00:32:15,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-13T00:32:15,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:32:15,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-13T00:32:15,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-13T00:32:15,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-13T00:32:15,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:32:15,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-13T00:32:15,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/nfs.dump.dir in system properties and HBase conf 2024-11-13T00:32:15,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/java.io.tmpdir in system properties and HBase conf 2024-11-13T00:32:15,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-13T00:32:15,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-13T00:32:15,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-13T00:32:15,899 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:32:15,952 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:32:15,955 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:32:15,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:32:15,957 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:32:15,957 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:32:15,959 INFO [regionserver/cb28580903fb:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:32:15,959 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:32:15,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e10390{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:32:15,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a3de627{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:32:16,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:16,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:16,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ef093cd{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/java.io.tmpdir/jetty-localhost-35529-hadoop-hdfs-3_4_1-tests_jar-_-any-13371503919505544072/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:32:16,074 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3636d357{HTTP/1.1, (http/1.1)}{localhost:35529} 2024-11-13T00:32:16,074 INFO [Time-limited test {}] server.Server(415): Started @281922ms 2024-11-13T00:32:16,086 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-13T00:32:16,138 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:32:16,140 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:32:16,141 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:32:16,141 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:32:16,141 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-13T00:32:16,141 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@dc5c84{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:32:16,142 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c219139{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:32:16,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3cc2275a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/java.io.tmpdir/jetty-localhost-43011-hadoop-hdfs-3_4_1-tests_jar-_-any-1605878896722618636/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:32:16,255 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@280c54a4{HTTP/1.1, (http/1.1)}{localhost:43011} 2024-11-13T00:32:16,255 INFO [Time-limited test {}] server.Server(415): Started @282103ms 2024-11-13T00:32:16,256 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:32:16,283 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-13T00:32:16,285 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-13T00:32:16,286 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-13T00:32:16,286 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-13T00:32:16,286 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-13T00:32:16,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f138979{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/hadoop.log.dir/,AVAILABLE} 2024-11-13T00:32:16,287 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@189ac7ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-13T00:32:16,347 WARN [Thread-2489 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/data/data1/current/BP-1032376881-172.17.0.2-1731457935904/current, will proceed with Du for space computation calculation, 2024-11-13T00:32:16,347 WARN [Thread-2490 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/data/data2/current/BP-1032376881-172.17.0.2-1731457935904/current, will proceed with Du for space computation calculation, 2024-11-13T00:32:16,363 WARN [Thread-2468 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:32:16,366 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3543fc51e6ae478b with lease ID 0xadb6bccd0f3c4b12: Processing first storage report for DS-4e015aea-d62b-4282-8635-560d4612b5f5 from datanode DatanodeRegistration(127.0.0.1:38253, datanodeUuid=9569a5a5-c5ad-4ce1-b3ab-06991b3ea235, infoPort=46597, infoSecurePort=0, ipcPort=34175, storageInfo=lv=-57;cid=testClusterID;nsid=715834032;c=1731457935904) 2024-11-13T00:32:16,366 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3543fc51e6ae478b with lease ID 0xadb6bccd0f3c4b12: from storage DS-4e015aea-d62b-4282-8635-560d4612b5f5 node DatanodeRegistration(127.0.0.1:38253, datanodeUuid=9569a5a5-c5ad-4ce1-b3ab-06991b3ea235, infoPort=46597, infoSecurePort=0, ipcPort=34175, storageInfo=lv=-57;cid=testClusterID;nsid=715834032;c=1731457935904), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:32:16,366 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3543fc51e6ae478b with lease ID 0xadb6bccd0f3c4b12: Processing first storage report for DS-d38def3a-1aa8-4b63-a7d2-54c3f63e37d7 from datanode DatanodeRegistration(127.0.0.1:38253, datanodeUuid=9569a5a5-c5ad-4ce1-b3ab-06991b3ea235, infoPort=46597, infoSecurePort=0, ipcPort=34175, storageInfo=lv=-57;cid=testClusterID;nsid=715834032;c=1731457935904) 2024-11-13T00:32:16,366 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3543fc51e6ae478b with lease ID 0xadb6bccd0f3c4b12: from storage DS-d38def3a-1aa8-4b63-a7d2-54c3f63e37d7 node DatanodeRegistration(127.0.0.1:38253, datanodeUuid=9569a5a5-c5ad-4ce1-b3ab-06991b3ea235, infoPort=46597, infoSecurePort=0, ipcPort=34175, storageInfo=lv=-57;cid=testClusterID;nsid=715834032;c=1731457935904), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:32:16,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@23bea8a3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/java.io.tmpdir/jetty-localhost-42205-hadoop-hdfs-3_4_1-tests_jar-_-any-14427657832872052181/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:32:16,402 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36271130{HTTP/1.1, (http/1.1)}{localhost:42205} 2024-11-13T00:32:16,403 INFO [Time-limited test {}] server.Server(415): Started @282251ms 2024-11-13T00:32:16,404 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-13T00:32:16,500 WARN [Thread-2515 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/data/data3/current/BP-1032376881-172.17.0.2-1731457935904/current, will proceed with Du for space computation calculation, 2024-11-13T00:32:16,501 WARN [Thread-2516 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/data/data4/current/BP-1032376881-172.17.0.2-1731457935904/current, will proceed with Du for space computation calculation, 2024-11-13T00:32:16,519 WARN [Thread-2504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-13T00:32:16,521 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x40197f1dc5455932 with lease ID 0xadb6bccd0f3c4b13: Processing first storage report for DS-f4b1cb77-fec5-4c1d-bad7-db32a13824e4 from datanode DatanodeRegistration(127.0.0.1:33285, datanodeUuid=90873097-09d3-49a3-9f47-8fabd116f227, infoPort=38535, infoSecurePort=0, ipcPort=36399, storageInfo=lv=-57;cid=testClusterID;nsid=715834032;c=1731457935904) 2024-11-13T00:32:16,521 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x40197f1dc5455932 with lease ID 0xadb6bccd0f3c4b13: from storage DS-f4b1cb77-fec5-4c1d-bad7-db32a13824e4 node DatanodeRegistration(127.0.0.1:33285, datanodeUuid=90873097-09d3-49a3-9f47-8fabd116f227, infoPort=38535, infoSecurePort=0, ipcPort=36399, storageInfo=lv=-57;cid=testClusterID;nsid=715834032;c=1731457935904), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:32:16,521 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x40197f1dc5455932 with lease ID 0xadb6bccd0f3c4b13: Processing first storage report for DS-accd3d91-ff47-4f7c-8302-bcff5af7ac63 from datanode DatanodeRegistration(127.0.0.1:33285, datanodeUuid=90873097-09d3-49a3-9f47-8fabd116f227, infoPort=38535, infoSecurePort=0, ipcPort=36399, storageInfo=lv=-57;cid=testClusterID;nsid=715834032;c=1731457935904) 2024-11-13T00:32:16,521 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x40197f1dc5455932 with lease ID 0xadb6bccd0f3c4b13: from storage DS-accd3d91-ff47-4f7c-8302-bcff5af7ac63 node DatanodeRegistration(127.0.0.1:33285, datanodeUuid=90873097-09d3-49a3-9f47-8fabd116f227, infoPort=38535, infoSecurePort=0, ipcPort=36399, storageInfo=lv=-57;cid=testClusterID;nsid=715834032;c=1731457935904), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-13T00:32:16,526 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d 2024-11-13T00:32:16,528 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/zookeeper_0, clientPort=57560, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-13T00:32:16,529 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57560 2024-11-13T00:32:16,529 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:32:16,530 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:32:16,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:32:16,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741825_1001 (size=7) 2024-11-13T00:32:16,540 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f with version=8 2024-11-13T00:32:16,540 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36657/user/jenkins/test-data/90fad67b-44a6-07b6-9c5c-bf568afb6a9a/hbase-staging 2024-11-13T00:32:16,542 INFO [Time-limited test {}] client.ConnectionUtils(128): master/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:32:16,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:32:16,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:32:16,542 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:32:16,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:32:16,542 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:32:16,542 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-13T00:32:16,542 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:32:16,543 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38917 2024-11-13T00:32:16,544 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38917 connecting to ZooKeeper ensemble=127.0.0.1:57560 2024-11-13T00:32:16,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389170x0, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:32:16,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38917-0x1010ebd571f0000 connected 2024-11-13T00:32:16,564 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:32:16,566 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:32:16,567 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:32:16,568 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f, hbase.cluster.distributed=false 2024-11-13T00:32:16,569 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:32:16,569 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38917 2024-11-13T00:32:16,570 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38917 2024-11-13T00:32:16,571 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38917 2024-11-13T00:32:16,571 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38917 2024-11-13T00:32:16,571 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38917 2024-11-13T00:32:16,586 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/cb28580903fb:0 server-side Connection retries=45 2024-11-13T00:32:16,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:32:16,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-13T00:32:16,586 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-13T00:32:16,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-13T00:32:16,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-13T00:32:16,586 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-13T00:32:16,586 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-13T00:32:16,587 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33487 2024-11-13T00:32:16,588 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33487 connecting to ZooKeeper ensemble=127.0.0.1:57560 2024-11-13T00:32:16,588 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:32:16,590 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:32:16,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334870x0, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-13T00:32:16,593 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33487-0x1010ebd571f0001 connected 2024-11-13T00:32:16,593 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:32:16,594 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-13T00:32:16,594 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-13T00:32:16,595 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-13T00:32:16,595 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-13T00:32:16,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33487 2024-11-13T00:32:16,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33487 2024-11-13T00:32:16,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33487 2024-11-13T00:32:16,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33487 2024-11-13T00:32:16,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33487 2024-11-13T00:32:16,608 DEBUG [M:0;cb28580903fb:38917 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;cb28580903fb:38917 2024-11-13T00:32:16,608 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/cb28580903fb,38917,1731457936541 2024-11-13T00:32:16,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:32:16,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:32:16,610 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/cb28580903fb,38917,1731457936541 2024-11-13T00:32:16,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-13T00:32:16,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:16,611 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:16,612 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-13T00:32:16,612 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/cb28580903fb,38917,1731457936541 from backup master directory 2024-11-13T00:32:16,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/cb28580903fb,38917,1731457936541 2024-11-13T00:32:16,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:32:16,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-13T00:32:16,614 WARN [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:32:16,614 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=cb28580903fb,38917,1731457936541 2024-11-13T00:32:16,618 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/hbase.id] with ID: 5a4dfb5d-0cc7-4092-b8ed-a8643e90a219 2024-11-13T00:32:16,618 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/.tmp/hbase.id 2024-11-13T00:32:16,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:32:16,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741826_1002 (size=42) 2024-11-13T00:32:16,623 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/.tmp/hbase.id]:[hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/hbase.id] 2024-11-13T00:32:16,633 INFO [master/cb28580903fb:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:32:16,633 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-13T00:32:16,634 INFO [master/cb28580903fb:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-13T00:32:16,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:16,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:16,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:32:16,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741827_1003 (size=196) 2024-11-13T00:32:16,645 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-13T00:32:16,645 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-13T00:32:16,646 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:32:16,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:32:16,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741828_1004 (size=1189) 2024-11-13T00:32:16,654 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store 2024-11-13T00:32:16,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:32:16,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741829_1005 (size=34) 2024-11-13T00:32:16,659 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:32:16,659 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:32:16,659 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:16,659 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:16,659 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:32:16,659 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:16,659 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:16,660 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457936659Disabling compacts and flushes for region at 1731457936659Disabling writes for close at 1731457936659Writing region close event to WAL at 1731457936659Closed at 1731457936659 2024-11-13T00:32:16,660 WARN [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/.initializing 2024-11-13T00:32:16,660 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/WALs/cb28580903fb,38917,1731457936541 2024-11-13T00:32:16,662 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C38917%2C1731457936541, suffix=, logDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/WALs/cb28580903fb,38917,1731457936541, archiveDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/oldWALs, maxLogs=10 2024-11-13T00:32:16,663 INFO [master/cb28580903fb:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C38917%2C1731457936541.1731457936663 2024-11-13T00:32:16,667 INFO [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/WALs/cb28580903fb,38917,1731457936541/cb28580903fb%2C38917%2C1731457936541.1731457936663 2024-11-13T00:32:16,668 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38535:38535),(127.0.0.1/127.0.0.1:46597:46597)] 2024-11-13T00:32:16,668 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:32:16,668 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:32:16,668 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,668 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,670 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,671 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-13T00:32:16,671 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:16,671 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:32:16,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-13T00:32:16,673 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:16,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:32:16,673 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-13T00:32:16,674 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:16,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:32:16,674 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-13T00:32:16,675 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:16,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-13T00:32:16,676 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,676 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,676 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,677 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,677 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,678 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-13T00:32:16,679 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-13T00:32:16,680 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:32:16,681 INFO [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713256, jitterRate=-0.09304864704608917}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-13T00:32:16,681 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731457936669Initializing all the Stores at 1731457936669Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457936669Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457936669Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457936669Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457936669Cleaning up temporary data from old regions at 1731457936677 (+8 ms)Region opened successfully at 1731457936681 (+4 ms) 2024-11-13T00:32:16,681 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-13T00:32:16,684 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1699476a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:32:16,685 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-13T00:32:16,685 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-13T00:32:16,685 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-13T00:32:16,685 INFO [master/cb28580903fb:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-13T00:32:16,685 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-13T00:32:16,685 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-13T00:32:16,686 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-13T00:32:16,687 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-13T00:32:16,688 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-13T00:32:16,689 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-13T00:32:16,689 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-13T00:32:16,690 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-13T00:32:16,691 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-13T00:32:16,691 INFO [master/cb28580903fb:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-13T00:32:16,692 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-13T00:32:16,694 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-13T00:32:16,695 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-13T00:32:16,696 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-13T00:32:16,698 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-13T00:32:16,699 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-13T00:32:16,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:32:16,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-13T00:32:16,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:16,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:16,701 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=cb28580903fb,38917,1731457936541, sessionid=0x1010ebd571f0000, setting cluster-up flag (Was=false) 2024-11-13T00:32:16,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:16,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:16,713 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-13T00:32:16,714 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,38917,1731457936541 2024-11-13T00:32:16,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:16,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:16,722 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-13T00:32:16,723 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=cb28580903fb,38917,1731457936541 2024-11-13T00:32:16,724 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-13T00:32:16,725 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-13T00:32:16,725 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-13T00:32:16,725 INFO [master/cb28580903fb:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-13T00:32:16,726 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: cb28580903fb,38917,1731457936541 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-13T00:32:16,727 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:32:16,727 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:32:16,727 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:32:16,727 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/cb28580903fb:0, corePoolSize=5, maxPoolSize=5 2024-11-13T00:32:16,727 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/cb28580903fb:0, corePoolSize=10, maxPoolSize=10 2024-11-13T00:32:16,727 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,727 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:32:16,727 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,727 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731457966727 2024-11-13T00:32:16,728 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-13T00:32:16,728 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-13T00:32:16,728 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-13T00:32:16,728 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-13T00:32:16,728 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-13T00:32:16,728 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-13T00:32:16,728 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,728 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:32:16,728 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-13T00:32:16,728 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-13T00:32:16,728 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-13T00:32:16,728 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-13T00:32:16,729 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-13T00:32:16,729 INFO [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-13T00:32:16,729 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457936729,5,FailOnTimeoutGroup] 2024-11-13T00:32:16,729 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457936729,5,FailOnTimeoutGroup] 2024-11-13T00:32:16,729 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,729 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:16,729 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-13T00:32:16,729 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,729 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,729 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-13T00:32:16,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:32:16,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741831_1007 (size=1321) 2024-11-13T00:32:16,735 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-13T00:32:16,735 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f 2024-11-13T00:32:16,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:16,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:32:16,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741832_1008 (size=32) 2024-11-13T00:32:16,741 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:32:16,742 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:32:16,743 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:32:16,743 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:16,743 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:32:16,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:32:16,745 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:32:16,745 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:16,745 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:32:16,745 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:32:16,746 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:32:16,746 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:16,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:32:16,746 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:32:16,747 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:32:16,747 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:16,748 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:32:16,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:32:16,748 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740 2024-11-13T00:32:16,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740 2024-11-13T00:32:16,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:32:16,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:32:16,750 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:32:16,751 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:32:16,752 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-13T00:32:16,752 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=818719, jitterRate=0.04105605185031891}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:32:16,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731457936741Initializing all the Stores at 1731457936741Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457936742 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457936742Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457936742Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457936742Cleaning up temporary data from old regions at 1731457936749 (+7 ms)Region opened successfully at 1731457936753 (+4 ms) 2024-11-13T00:32:16,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:32:16,753 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:32:16,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:32:16,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:32:16,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:32:16,753 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:32:16,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457936753Disabling compacts and flushes for region at 1731457936753Disabling writes for close at 1731457936753Writing region close event to WAL at 1731457936753Closed at 1731457936753 2024-11-13T00:32:16,754 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:32:16,755 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-13T00:32:16,755 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-13T00:32:16,756 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:32:16,756 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-13T00:32:16,798 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(746): ClusterId : 5a4dfb5d-0cc7-4092-b8ed-a8643e90a219 2024-11-13T00:32:16,798 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-13T00:32:16,800 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-13T00:32:16,800 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-13T00:32:16,802 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-13T00:32:16,802 DEBUG [RS:0;cb28580903fb:33487 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b01dc8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=cb28580903fb/172.17.0.2:0 2024-11-13T00:32:16,814 DEBUG [RS:0;cb28580903fb:33487 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;cb28580903fb:33487 2024-11-13T00:32:16,814 INFO [RS:0;cb28580903fb:33487 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-13T00:32:16,814 INFO [RS:0;cb28580903fb:33487 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-13T00:32:16,814 DEBUG [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-13T00:32:16,815 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(2659): reportForDuty to master=cb28580903fb,38917,1731457936541 with port=33487, startcode=1731457936586 2024-11-13T00:32:16,815 DEBUG [RS:0;cb28580903fb:33487 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-13T00:32:16,817 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59657, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-13T00:32:16,818 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38917 {}] master.ServerManager(363): Checking decommissioned status of RegionServer cb28580903fb,33487,1731457936586 2024-11-13T00:32:16,818 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38917 {}] master.ServerManager(517): Registering regionserver=cb28580903fb,33487,1731457936586 2024-11-13T00:32:16,819 DEBUG [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f 2024-11-13T00:32:16,819 DEBUG [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43979 2024-11-13T00:32:16,819 DEBUG [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-13T00:32:16,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:32:16,822 DEBUG [RS:0;cb28580903fb:33487 {}] zookeeper.ZKUtil(111): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/cb28580903fb,33487,1731457936586 2024-11-13T00:32:16,822 WARN [RS:0;cb28580903fb:33487 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-13T00:32:16,822 INFO [RS:0;cb28580903fb:33487 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:32:16,822 DEBUG [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/cb28580903fb,33487,1731457936586 2024-11-13T00:32:16,822 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [cb28580903fb,33487,1731457936586] 2024-11-13T00:32:16,825 INFO [RS:0;cb28580903fb:33487 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-13T00:32:16,826 INFO [RS:0;cb28580903fb:33487 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-13T00:32:16,827 INFO [RS:0;cb28580903fb:33487 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-13T00:32:16,827 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,827 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-13T00:32:16,827 INFO [RS:0;cb28580903fb:33487 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-13T00:32:16,827 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/cb28580903fb:0, corePoolSize=2, maxPoolSize=2 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/cb28580903fb:0, corePoolSize=1, maxPoolSize=1 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:32:16,828 DEBUG [RS:0;cb28580903fb:33487 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/cb28580903fb:0, corePoolSize=3, maxPoolSize=3 2024-11-13T00:32:16,828 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,828 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,828 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,828 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,828 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,828 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,33487,1731457936586-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:32:16,843 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-13T00:32:16,843 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,33487,1731457936586-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,843 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,843 INFO [RS:0;cb28580903fb:33487 {}] regionserver.Replication(171): cb28580903fb,33487,1731457936586 started 2024-11-13T00:32:16,857 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:16,857 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(1482): Serving as cb28580903fb,33487,1731457936586, RpcServer on cb28580903fb/172.17.0.2:33487, sessionid=0x1010ebd571f0001 2024-11-13T00:32:16,857 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-13T00:32:16,857 DEBUG [RS:0;cb28580903fb:33487 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager cb28580903fb,33487,1731457936586 2024-11-13T00:32:16,857 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,33487,1731457936586' 2024-11-13T00:32:16,857 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-13T00:32:16,857 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-13T00:32:16,858 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-13T00:32:16,858 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-13T00:32:16,858 DEBUG [RS:0;cb28580903fb:33487 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager cb28580903fb,33487,1731457936586 2024-11-13T00:32:16,858 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'cb28580903fb,33487,1731457936586' 2024-11-13T00:32:16,858 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-13T00:32:16,858 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-13T00:32:16,858 DEBUG [RS:0;cb28580903fb:33487 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-13T00:32:16,858 INFO [RS:0;cb28580903fb:33487 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-13T00:32:16,858 INFO [RS:0;cb28580903fb:33487 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-13T00:32:16,907 WARN [cb28580903fb:38917 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-13T00:32:16,960 INFO [RS:0;cb28580903fb:33487 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C33487%2C1731457936586, suffix=, logDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/cb28580903fb,33487,1731457936586, archiveDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/oldWALs, maxLogs=32 2024-11-13T00:32:16,961 INFO [RS:0;cb28580903fb:33487 {}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C33487%2C1731457936586.1731457936960 2024-11-13T00:32:16,969 INFO [RS:0;cb28580903fb:33487 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/cb28580903fb,33487,1731457936586/cb28580903fb%2C33487%2C1731457936586.1731457936960 2024-11-13T00:32:16,970 DEBUG [RS:0;cb28580903fb:33487 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46597:46597),(127.0.0.1/127.0.0.1:38535:38535)] 2024-11-13T00:32:17,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,41233,1731457759323/cb28580903fb%2C41233%2C1731457759323.meta.1731457760430.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:17,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/WALs/cb28580903fb,35015,1731457760593/cb28580903fb%2C35015%2C1731457760593.1731457760827 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:17,157 DEBUG [cb28580903fb:38917 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-13T00:32:17,157 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=cb28580903fb,33487,1731457936586 2024-11-13T00:32:17,159 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,33487,1731457936586, state=OPENING 2024-11-13T00:32:17,160 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-13T00:32:17,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:17,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:17,162 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-13T00:32:17,162 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,33487,1731457936586}] 2024-11-13T00:32:17,162 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:32:17,162 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:32:17,313 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-13T00:32:17,315 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35721, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-13T00:32:17,318 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-13T00:32:17,318 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:32:17,320 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=cb28580903fb%2C33487%2C1731457936586.meta, suffix=.meta, logDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/cb28580903fb,33487,1731457936586, archiveDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/oldWALs, maxLogs=32 2024-11-13T00:32:17,320 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor cb28580903fb%2C33487%2C1731457936586.meta.1731457937320.meta 2024-11-13T00:32:17,325 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/cb28580903fb,33487,1731457936586/cb28580903fb%2C33487%2C1731457936586.meta.1731457937320.meta 2024-11-13T00:32:17,328 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46597:46597),(127.0.0.1/127.0.0.1:38535:38535)] 2024-11-13T00:32:17,332 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-13T00:32:17,333 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-13T00:32:17,333 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-13T00:32:17,333 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-13T00:32:17,333 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-13T00:32:17,333 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-13T00:32:17,333 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-13T00:32:17,333 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-13T00:32:17,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-13T00:32:17,335 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-13T00:32:17,335 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:17,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:32:17,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-13T00:32:17,336 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-13T00:32:17,336 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:17,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:32:17,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-13T00:32:17,337 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-13T00:32:17,337 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:17,337 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:32:17,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-13T00:32:17,338 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-13T00:32:17,338 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-13T00:32:17,338 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-13T00:32:17,338 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-13T00:32:17,339 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740 2024-11-13T00:32:17,340 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740 2024-11-13T00:32:17,341 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-13T00:32:17,341 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-13T00:32:17,341 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-13T00:32:17,343 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-13T00:32:17,343 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707733, jitterRate=-0.10007175803184509}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-13T00:32:17,343 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-13T00:32:17,344 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731457937333Writing region info on filesystem at 1731457937333Initializing all the Stores at 1731457937334 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457937334Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457937334Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731457937334Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731457937334Cleaning up temporary data from old regions at 1731457937341 (+7 ms)Running coprocessor post-open hooks at 1731457937343 (+2 ms)Region opened successfully at 1731457937344 (+1 ms) 2024-11-13T00:32:17,345 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731457937313 2024-11-13T00:32:17,347 DEBUG [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-13T00:32:17,347 INFO [RS_OPEN_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-13T00:32:17,348 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=cb28580903fb,33487,1731457936586 2024-11-13T00:32:17,348 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as cb28580903fb,33487,1731457936586, state=OPEN 2024-11-13T00:32:17,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:32:17,354 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-13T00:32:17,355 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=cb28580903fb,33487,1731457936586 2024-11-13T00:32:17,355 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:32:17,355 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-13T00:32:17,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-13T00:32:17,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=cb28580903fb,33487,1731457936586 in 193 msec 2024-11-13T00:32:17,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-13T00:32:17,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 602 msec 2024-11-13T00:32:17,360 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-13T00:32:17,360 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-13T00:32:17,361 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:32:17,361 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,33487,1731457936586, seqNum=-1] 2024-11-13T00:32:17,361 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:32:17,362 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45885, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:32:17,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 641 msec 2024-11-13T00:32:17,367 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731457937367, completionTime=-1 2024-11-13T00:32:17,367 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-13T00:32:17,367 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-13T00:32:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-13T00:32:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731457997369 2024-11-13T00:32:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731458057369 2024-11-13T00:32:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-13T00:32:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,38917,1731457936541-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,38917,1731457936541-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,38917,1731457936541-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-cb28580903fb:38917, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:17,369 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:17,371 DEBUG [master/cb28580903fb:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-13T00:32:17,373 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.758sec 2024-11-13T00:32:17,373 INFO [master/cb28580903fb:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-13T00:32:17,373 INFO [master/cb28580903fb:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-13T00:32:17,373 INFO [master/cb28580903fb:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-13T00:32:17,373 INFO [master/cb28580903fb:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-13T00:32:17,373 INFO [master/cb28580903fb:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-13T00:32:17,373 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,38917,1731457936541-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-13T00:32:17,373 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,38917,1731457936541-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-13T00:32:17,375 DEBUG [master/cb28580903fb:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-13T00:32:17,375 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-13T00:32:17,375 INFO [master/cb28580903fb:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=cb28580903fb,38917,1731457936541-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-13T00:32:17,398 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@802d266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:32:17,398 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request cb28580903fb,38917,-1 for getting cluster id 2024-11-13T00:32:17,398 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-13T00:32:17,400 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5a4dfb5d-0cc7-4092-b8ed-a8643e90a219' 2024-11-13T00:32:17,400 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-13T00:32:17,400 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5a4dfb5d-0cc7-4092-b8ed-a8643e90a219" 2024-11-13T00:32:17,400 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63bb83a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:32:17,400 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [cb28580903fb,38917,-1] 2024-11-13T00:32:17,400 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-13T00:32:17,401 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:17,401 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36442, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-13T00:32:17,402 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb34dbf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-13T00:32:17,402 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-13T00:32:17,403 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=cb28580903fb,33487,1731457936586, seqNum=-1] 2024-11-13T00:32:17,403 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-13T00:32:17,404 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50328, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-13T00:32:17,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=cb28580903fb,38917,1731457936541 2024-11-13T00:32:17,405 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-13T00:32:17,408 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-13T00:32:17,408 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-13T00:32:17,410 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/test.com,8080,1, archiveDir=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/oldWALs, maxLogs=32 2024-11-13T00:32:17,410 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731457937410 2024-11-13T00:32:17,414 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/test.com,8080,1/test.com%2C8080%2C1.1731457937410 2024-11-13T00:32:17,415 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38535:38535),(127.0.0.1/127.0.0.1:46597:46597)] 2024-11-13T00:32:17,416 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731457937416 2024-11-13T00:32:17,420 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,420 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,420 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,420 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,420 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,420 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/test.com,8080,1/test.com%2C8080%2C1.1731457937410 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/test.com,8080,1/test.com%2C8080%2C1.1731457937416 2024-11-13T00:32:17,421 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38535:38535),(127.0.0.1/127.0.0.1:46597:46597)] 2024-11-13T00:32:17,421 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/test.com,8080,1/test.com%2C8080%2C1.1731457937410 is not closed yet, will try archiving it next time 2024-11-13T00:32:17,421 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741835_1011 (size=93) 2024-11-13T00:32:17,422 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,422 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,422 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,422 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741835_1011 (size=93) 2024-11-13T00:32:17,423 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/WALs/test.com,8080,1/test.com%2C8080%2C1.1731457937410 to hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/oldWALs/test.com%2C8080%2C1.1731457937410 2024-11-13T00:32:17,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741836_1012 (size=93) 2024-11-13T00:32:17,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741836_1012 (size=93) 2024-11-13T00:32:17,426 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/oldWALs 2024-11-13T00:32:17,426 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731457937416) 2024-11-13T00:32:17,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-13T00:32:17,426 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:32:17,426 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:32:17,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:17,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:17,426 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-13T00:32:17,426 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-13T00:32:17,426 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=770739577, stopped=false 2024-11-13T00:32:17,426 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=cb28580903fb,38917,1731457936541 2024-11-13T00:32:17,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:32:17,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-13T00:32:17,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:17,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:17,428 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:32:17,428 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-13T00:32:17,428 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:32:17,428 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:17,428 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'cb28580903fb,33487,1731457936586' ***** 2024-11-13T00:32:17,428 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-13T00:32:17,428 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:32:17,428 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-13T00:32:17,429 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(959): stopping server cb28580903fb,33487,1731457936586 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;cb28580903fb:33487. 2024-11-13T00:32:17,429 DEBUG [RS:0;cb28580903fb:33487 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-13T00:32:17,429 DEBUG [RS:0;cb28580903fb:33487 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-13T00:32:17,429 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-13T00:32:17,429 DEBUG [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-13T00:32:17,429 DEBUG [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-13T00:32:17,429 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-13T00:32:17,430 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-13T00:32:17,430 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-13T00:32:17,430 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-13T00:32:17,430 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-13T00:32:17,430 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-13T00:32:17,444 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740/.tmp/ns/2764612973064f059bb127bd57e80416 is 43, key is default/ns:d/1731457937363/Put/seqid=0 2024-11-13T00:32:17,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741837_1013 (size=5153) 2024-11-13T00:32:17,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741837_1013 (size=5153) 2024-11-13T00:32:17,449 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740/.tmp/ns/2764612973064f059bb127bd57e80416 2024-11-13T00:32:17,453 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740/.tmp/ns/2764612973064f059bb127bd57e80416 as hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740/ns/2764612973064f059bb127bd57e80416 2024-11-13T00:32:17,457 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740/ns/2764612973064f059bb127bd57e80416, entries=2, sequenceid=6, filesize=5.0 K 2024-11-13T00:32:17,458 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 28ms, sequenceid=6, compaction requested=false 2024-11-13T00:32:17,461 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-13T00:32:17,462 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-13T00:32:17,462 INFO [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-13T00:32:17,462 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731457937429Running coprocessor pre-close hooks at 1731457937429Disabling compacts and flushes for region at 1731457937429Disabling writes for close at 1731457937430 (+1 ms)Obtaining lock to block concurrent updates at 1731457937430Preparing flush snapshotting stores in 1588230740 at 1731457937430Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731457937430Flushing stores of hbase:meta,,1.1588230740 at 1731457937431 (+1 ms)Flushing 1588230740/ns: creating writer at 1731457937431Flushing 1588230740/ns: appending metadata at 1731457937444 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1731457937444Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73212b7: reopening flushed file at 1731457937453 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 28ms, sequenceid=6, compaction requested=false at 1731457937458 (+5 ms)Writing region close event to WAL at 1731457937459 (+1 ms)Running coprocessor post-close hooks at 1731457937462 (+3 ms)Closed at 1731457937462 2024-11-13T00:32:17,462 DEBUG [RS_CLOSE_META-regionserver/cb28580903fb:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-13T00:32:17,630 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(976): stopping server cb28580903fb,33487,1731457936586; all regions closed. 2024-11-13T00:32:17,630 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,630 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,630 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,630 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,630 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741834_1010 (size=1152) 2024-11-13T00:32:17,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741834_1010 (size=1152) 2024-11-13T00:32:17,635 DEBUG [RS:0;cb28580903fb:33487 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/oldWALs 2024-11-13T00:32:17,635 INFO [RS:0;cb28580903fb:33487 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C33487%2C1731457936586.meta:.meta(num 1731457937320) 2024-11-13T00:32:17,635 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,635 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,635 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,635 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,635 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741833_1009 (size=93) 2024-11-13T00:32:17,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741833_1009 (size=93) 2024-11-13T00:32:17,639 DEBUG [RS:0;cb28580903fb:33487 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/oldWALs 2024-11-13T00:32:17,639 INFO [RS:0;cb28580903fb:33487 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog cb28580903fb%2C33487%2C1731457936586:(num 1731457936960) 2024-11-13T00:32:17,639 DEBUG [RS:0;cb28580903fb:33487 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-13T00:32:17,639 INFO [RS:0;cb28580903fb:33487 {}] regionserver.LeaseManager(133): Closed leases 2024-11-13T00:32:17,639 INFO [RS:0;cb28580903fb:33487 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:32:17,639 INFO [RS:0;cb28580903fb:33487 {}] hbase.ChoreService(370): Chore service for: regionserver/cb28580903fb:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-13T00:32:17,639 INFO [RS:0;cb28580903fb:33487 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:32:17,639 INFO [regionserver/cb28580903fb:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:32:17,639 INFO [RS:0;cb28580903fb:33487 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33487 2024-11-13T00:32:17,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/cb28580903fb,33487,1731457936586 2024-11-13T00:32:17,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-13T00:32:17,642 INFO [RS:0;cb28580903fb:33487 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:32:17,643 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [cb28580903fb,33487,1731457936586] 2024-11-13T00:32:17,650 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/cb28580903fb,33487,1731457936586 already deleted, retry=false 2024-11-13T00:32:17,650 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; cb28580903fb,33487,1731457936586 expired; onlineServers=0 2024-11-13T00:32:17,650 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'cb28580903fb,38917,1731457936541' ***** 2024-11-13T00:32:17,650 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-13T00:32:17,650 INFO [M:0;cb28580903fb:38917 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-13T00:32:17,650 INFO [M:0;cb28580903fb:38917 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-13T00:32:17,650 DEBUG [M:0;cb28580903fb:38917 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-13T00:32:17,651 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-13T00:32:17,651 DEBUG [M:0;cb28580903fb:38917 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-13T00:32:17,651 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457936729 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.small.0-1731457936729,5,FailOnTimeoutGroup] 2024-11-13T00:32:17,651 DEBUG [master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457936729 {}] cleaner.HFileCleaner(306): Exit Thread[master/cb28580903fb:0:becomeActiveMaster-HFileCleaner.large.0-1731457936729,5,FailOnTimeoutGroup] 2024-11-13T00:32:17,651 INFO [M:0;cb28580903fb:38917 {}] hbase.ChoreService(370): Chore service for: master/cb28580903fb:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-13T00:32:17,651 INFO [M:0;cb28580903fb:38917 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-13T00:32:17,651 DEBUG [M:0;cb28580903fb:38917 {}] master.HMaster(1795): Stopping service threads 2024-11-13T00:32:17,651 INFO [M:0;cb28580903fb:38917 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-13T00:32:17,651 INFO [M:0;cb28580903fb:38917 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-13T00:32:17,651 INFO [M:0;cb28580903fb:38917 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-13T00:32:17,651 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-13T00:32:17,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-13T00:32:17,652 DEBUG [M:0;cb28580903fb:38917 {}] zookeeper.ZKUtil(347): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-13T00:32:17,652 WARN [M:0;cb28580903fb:38917 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-13T00:32:17,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-13T00:32:17,653 INFO [M:0;cb28580903fb:38917 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/.lastflushedseqids 2024-11-13T00:32:17,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741838_1014 (size=99) 2024-11-13T00:32:17,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741838_1014 (size=99) 2024-11-13T00:32:17,658 INFO [M:0;cb28580903fb:38917 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-13T00:32:17,658 INFO [M:0;cb28580903fb:38917 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-13T00:32:17,658 DEBUG [M:0;cb28580903fb:38917 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-13T00:32:17,658 INFO [M:0;cb28580903fb:38917 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:17,658 DEBUG [M:0;cb28580903fb:38917 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:17,658 DEBUG [M:0;cb28580903fb:38917 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-13T00:32:17,658 DEBUG [M:0;cb28580903fb:38917 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:17,659 INFO [M:0;cb28580903fb:38917 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-13T00:32:17,674 DEBUG [M:0;cb28580903fb:38917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/240c3b13c72041e68520152b727f8faa is 82, key is hbase:meta,,1/info:regioninfo/1731457937347/Put/seqid=0 2024-11-13T00:32:17,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741839_1015 (size=5672) 2024-11-13T00:32:17,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741839_1015 (size=5672) 2024-11-13T00:32:17,679 INFO [M:0;cb28580903fb:38917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/240c3b13c72041e68520152b727f8faa 2024-11-13T00:32:17,696 DEBUG [M:0;cb28580903fb:38917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec9df1758c7a467c962a447de585789e is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731457937366/Put/seqid=0 2024-11-13T00:32:17,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741840_1016 (size=5275) 2024-11-13T00:32:17,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741840_1016 (size=5275) 2024-11-13T00:32:17,700 INFO [M:0;cb28580903fb:38917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec9df1758c7a467c962a447de585789e 2024-11-13T00:32:17,717 DEBUG [M:0;cb28580903fb:38917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0eb41657e266421f96309c6a8483079c is 69, key is cb28580903fb,33487,1731457936586/rs:state/1731457936818/Put/seqid=0 2024-11-13T00:32:17,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741841_1017 (size=5156) 2024-11-13T00:32:17,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741841_1017 (size=5156) 2024-11-13T00:32:17,722 INFO [M:0;cb28580903fb:38917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0eb41657e266421f96309c6a8483079c 2024-11-13T00:32:17,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:34283/user/jenkins/test-data/72a35236-0094-0bc8-1656-863367a24e79/MasterData/WALs/cb28580903fb,32917,1731457759225/cb28580903fb%2C32917%2C1731457759225.1731457759520 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-13T00:32:17,739 DEBUG [M:0;cb28580903fb:38917 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b8f137a7c634460bb0f3b91fab11eff is 52, key is load_balancer_on/state:d/1731457937407/Put/seqid=0 2024-11-13T00:32:17,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741842_1018 (size=5056) 2024-11-13T00:32:17,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741842_1018 (size=5056) 2024-11-13T00:32:17,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:32:17,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33487-0x1010ebd571f0001, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:32:17,743 INFO [RS:0;cb28580903fb:33487 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:32:17,743 INFO [RS:0;cb28580903fb:33487 {}] regionserver.HRegionServer(1031): Exiting; stopping=cb28580903fb,33487,1731457936586; zookeeper connection closed. 2024-11-13T00:32:17,743 INFO [M:0;cb28580903fb:38917 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b8f137a7c634460bb0f3b91fab11eff 2024-11-13T00:32:17,743 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@78e4e14b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@78e4e14b 2024-11-13T00:32:17,744 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-13T00:32:17,747 DEBUG [M:0;cb28580903fb:38917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/240c3b13c72041e68520152b727f8faa as hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/240c3b13c72041e68520152b727f8faa 2024-11-13T00:32:17,751 INFO [M:0;cb28580903fb:38917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/240c3b13c72041e68520152b727f8faa, entries=8, sequenceid=29, filesize=5.5 K 2024-11-13T00:32:17,752 DEBUG [M:0;cb28580903fb:38917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec9df1758c7a467c962a447de585789e as hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ec9df1758c7a467c962a447de585789e 2024-11-13T00:32:17,755 INFO [M:0;cb28580903fb:38917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ec9df1758c7a467c962a447de585789e, entries=3, sequenceid=29, filesize=5.2 K 2024-11-13T00:32:17,756 DEBUG [M:0;cb28580903fb:38917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0eb41657e266421f96309c6a8483079c as hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0eb41657e266421f96309c6a8483079c 2024-11-13T00:32:17,759 INFO [M:0;cb28580903fb:38917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0eb41657e266421f96309c6a8483079c, entries=1, sequenceid=29, filesize=5.0 K 2024-11-13T00:32:17,760 DEBUG [M:0;cb28580903fb:38917 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9b8f137a7c634460bb0f3b91fab11eff as hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9b8f137a7c634460bb0f3b91fab11eff 2024-11-13T00:32:17,763 INFO [M:0;cb28580903fb:38917 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43979/user/jenkins/test-data/54f258ed-efb1-c29e-82a2-56c5681dca0f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9b8f137a7c634460bb0f3b91fab11eff, entries=1, sequenceid=29, filesize=4.9 K 2024-11-13T00:32:17,764 INFO [M:0;cb28580903fb:38917 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=29, compaction requested=false 2024-11-13T00:32:17,766 INFO [M:0;cb28580903fb:38917 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-13T00:32:17,766 DEBUG [M:0;cb28580903fb:38917 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731457937658Disabling compacts and flushes for region at 1731457937658Disabling writes for close at 1731457937658Obtaining lock to block concurrent updates at 1731457937659 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731457937659Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731457937659Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731457937660 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731457937660Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731457937673 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731457937674 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731457937682 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731457937695 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731457937695Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731457937704 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731457937717 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731457937717Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731457937726 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731457937739 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731457937739Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e872c71: reopening flushed file at 1731457937747 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64266647: reopening flushed file at 1731457937751 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4401bf2b: reopening flushed file at 1731457937755 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@13db0fd3: reopening flushed file at 1731457937759 (+4 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 106ms, sequenceid=29, compaction requested=false at 1731457937764 (+5 ms)Writing region close event to WAL at 1731457937765 (+1 ms)Closed at 1731457937765 2024-11-13T00:32:17,766 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,766 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,766 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,766 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,766 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-13T00:32:17,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741830_1006 (size=10311) 2024-11-13T00:32:17,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33285 is added to blk_1073741830_1006 (size=10311) 2024-11-13T00:32:17,768 INFO [M:0;cb28580903fb:38917 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-13T00:32:17,769 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-13T00:32:17,769 INFO [M:0;cb28580903fb:38917 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38917 2024-11-13T00:32:17,769 INFO [M:0;cb28580903fb:38917 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-13T00:32:17,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:32:17,871 INFO [M:0;cb28580903fb:38917 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-13T00:32:17,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38917-0x1010ebd571f0000, quorum=127.0.0.1:57560, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-13T00:32:17,873 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@23bea8a3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:32:17,874 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36271130{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:32:17,874 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:32:17,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@189ac7ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:32:17,874 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f138979{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/hadoop.log.dir/,STOPPED} 2024-11-13T00:32:17,875 WARN [BP-1032376881-172.17.0.2-1731457935904 heartbeating to localhost/127.0.0.1:43979 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:32:17,875 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:32:17,875 WARN [BP-1032376881-172.17.0.2-1731457935904 heartbeating to localhost/127.0.0.1:43979 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1032376881-172.17.0.2-1731457935904 (Datanode Uuid 90873097-09d3-49a3-9f47-8fabd116f227) service to localhost/127.0.0.1:43979 2024-11-13T00:32:17,875 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:32:17,876 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/data/data3/current/BP-1032376881-172.17.0.2-1731457935904 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:32:17,876 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/data/data4/current/BP-1032376881-172.17.0.2-1731457935904 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:32:17,876 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:32:17,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3cc2275a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-13T00:32:17,878 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@280c54a4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:32:17,878 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:32:17,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c219139{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:32:17,878 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@dc5c84{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/hadoop.log.dir/,STOPPED} 2024-11-13T00:32:17,879 WARN [BP-1032376881-172.17.0.2-1731457935904 heartbeating to localhost/127.0.0.1:43979 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-13T00:32:17,879 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-13T00:32:17,879 WARN [BP-1032376881-172.17.0.2-1731457935904 heartbeating to localhost/127.0.0.1:43979 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1032376881-172.17.0.2-1731457935904 (Datanode Uuid 9569a5a5-c5ad-4ce1-b3ab-06991b3ea235) service to localhost/127.0.0.1:43979 2024-11-13T00:32:17,879 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-13T00:32:17,880 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/data/data1/current/BP-1032376881-172.17.0.2-1731457935904 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:32:17,880 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/cluster_adafe1e2-df5a-bfb0-580c-ccd8a855f81a/data/data2/current/BP-1032376881-172.17.0.2-1731457935904 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-13T00:32:17,880 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-13T00:32:17,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ef093cd{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-13T00:32:17,886 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3636d357{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-13T00:32:17,886 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-13T00:32:17,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a3de627{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-13T00:32:17,886 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e10390{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/02f7f4c2-a936-fd05-68fb-73b3cabee09d/hadoop.log.dir/,STOPPED} 2024-11-13T00:32:17,892 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-13T00:32:17,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-13T00:32:17,915 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=272 (was 233) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43979 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43979 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43979 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:43979 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:43979 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (599498808) connection to localhost/127.0.0.1:43979 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:43979 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43979 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=61 (was 61), ProcessCount=11 (was 11), AvailableMemoryMB=1185 (was 1190)